]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'batadv-next-for-davem-20180717' of git://git.open-mesh.org/linux-merge
authorDavid S. Miller <davem@davemloft.net>
Wed, 18 Jul 2018 05:46:57 +0000 (14:46 +0900)
committerDavid S. Miller <davem@davemloft.net>
Wed, 18 Jul 2018 05:46:57 +0000 (14:46 +0900)
Simon Wunderlich says:

====================
This feature/cleanup patchset includes the following patches:

 - Don't call BATMAN_V experimental in Kconfig anymore, by Sven Eckelmann

 - Enable DAT by default at compile time, by Antonio Quartulli

 - Remove obsolete default n in Kconfig, by Sven Eckelmann

 - Fix checkpatch spelling errors, by Sven Eckelmann

 - Unify header guards style, by Sven Eckelmann

 - Consolidate batadv_purge_orig functions, by Sven Eckelmann

 - Replace type define with proper typedef, by Sven Eckelmann
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
1455 files changed:
Documentation/ABI/stable/sysfs-class-rfkill
Documentation/ABI/testing/sysfs-class-net-queues
Documentation/admin-guide/pm/intel_pstate.rst
Documentation/core-api/kernel-api.rst
Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/dsa/realtek-smi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/fsl-fman.txt
Documentation/devicetree/bindings/net/rockchip-dwmac.txt
Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/driver-api/infrastructure.rst
Documentation/filesystems/Locking
Documentation/filesystems/cifs/AUTHORS
Documentation/filesystems/cifs/CHANGES
Documentation/filesystems/cifs/TODO
Documentation/filesystems/vfs.txt
Documentation/kbuild/kconfig-language.txt
Documentation/networking/e100.rst
Documentation/networking/e1000.rst
Documentation/networking/index.rst
Documentation/networking/ip-sysctl.txt
Documentation/networking/net_failover.rst
Documentation/networking/scaling.txt
Documentation/networking/strparser.txt
Documentation/rfkill.txt
Documentation/trace/histogram.txt
Documentation/usb/gadget_configfs.txt
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/include/uapi/asm/socket.h
arch/alpha/lib/Makefile
arch/alpha/lib/dec_and_lock.c [deleted file]
arch/arm/Kconfig
arch/arm/boot/dts/armada-385-synology-ds116.dts
arch/arm/boot/dts/bcm-cygnus.dtsi
arch/arm/boot/dts/bcm-hr2.dtsi
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/bcm5301x.dtsi
arch/arm/boot/dts/da850.dtsi
arch/arm/boot/dts/gemini-dlink-dir-685.dts
arch/arm/boot/dts/imx6q.dtsi
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/boot/dts/socfpga.dtsi
arch/arm/boot/dts/socfpga_arria10.dtsi
arch/arm/common/Makefile
arch/arm/configs/multi_v7_defconfig
arch/arm/kernel/signal.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-socfpga/Kconfig
arch/arm/net/bpf_jit_32.c
arch/arm/net/bpf_jit_32.h
arch/arm/xen/enlighten.c
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
arch/arm64/boot/dts/amlogic/meson-axg.dtsi
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
arch/arm64/boot/dts/marvell/armada-cp110.dtsi
arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
arch/arm64/boot/dts/qcom/msm8916.dtsi
arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
arch/arm64/configs/defconfig
arch/arm64/crypto/aes-glue.c
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/module.c
arch/arm64/kernel/smp.c
arch/arm64/kvm/fpsimd.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/proc.S
arch/ia64/include/uapi/asm/socket.h
arch/microblaze/Kconfig.debug
arch/microblaze/include/asm/setup.h
arch/microblaze/include/asm/unistd.h
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/Makefile
arch/microblaze/kernel/heartbeat.c [deleted file]
arch/microblaze/kernel/platform.c [deleted file]
arch/microblaze/kernel/reset.c
arch/microblaze/kernel/syscall_table.S
arch/microblaze/kernel/timer.c
arch/mips/Kconfig
arch/mips/ath79/mach-pb44.c
arch/mips/bcm47xx/setup.c
arch/mips/include/asm/io.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/uapi/asm/socket.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/entry.S
arch/mips/kernel/mcount.S
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/signal.c
arch/openrisc/include/asm/pgalloc.h
arch/openrisc/kernel/entry.S
arch/openrisc/kernel/head.S
arch/openrisc/kernel/traps.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/signal.h
arch/parisc/include/uapi/asm/socket.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/drivers.c
arch/parisc/kernel/syscall_table.S
arch/parisc/kernel/unwind.c
arch/powerpc/Makefile
arch/powerpc/boot/dts/fsl/qoriq-fman-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
arch/powerpc/include/asm/book3s/32/pgalloc.h
arch/powerpc/include/asm/book3s/64/pgtable-4k.h
arch/powerpc/include/asm/book3s/64/pgtable-64k.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/nmi.h
arch/powerpc/include/asm/nohash/32/pgalloc.h
arch/powerpc/include/asm/nohash/64/pgalloc.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/pci_32.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/stacktrace.c
arch/powerpc/kernel/syscalls.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/subpage-prot.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/platforms/powermac/time.c
arch/s390/include/asm/css_chars.h
arch/s390/include/uapi/asm/socket.h
arch/s390/net/bpf_jit_comp.c
arch/sparc/include/uapi/asm/socket.h
arch/x86/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/barrier.h
arch/x86/include/asm/pgalloc.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/vmx.h
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/cacheinfo.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mcheck/mce-severity.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/e820.c
arch/x86/kernel/head64.c
arch/x86/kernel/quirks.c
arch/x86/kernel/signal.c
arch/x86/kernel/traps.c
arch/x86/kernel/uprobes.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.h
arch/x86/mm/fault.c
arch/x86/mm/init_64.c
arch/x86/platform/efi/efi_64.c
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/enlighten_pvh.c
arch/x86/xen/smp_pv.c
arch/xtensa/include/uapi/asm/socket.h
block/bio.c
block/blk-core.c
block/blk-mq-debugfs.c
block/blk-mq.c
block/blk-softirq.c
block/blk-timeout.c
block/sed-opal.c
certs/blacklist.h
crypto/af_alg.c
crypto/algif_aead.c
crypto/algif_skcipher.c
crypto/asymmetric_keys/x509_cert_parser.c
crypto/morus640.c
crypto/sha3_generic.c
drivers/acpi/acpi_lpss.c
drivers/acpi/ec.c
drivers/acpi/osl.c
drivers/atm/iphase.c
drivers/atm/zatm.c
drivers/base/Makefile
drivers/base/core.c
drivers/base/power/domain.c
drivers/block/drbd/drbd_req.c
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/bluetooth/hci_nokia.c
drivers/char/hw_random/core.c
drivers/char/random.c
drivers/clocksource/timer-stm32.c
drivers/connector/connector.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/qcom-cpufreq-kryo.c
drivers/crypto/chelsio/chtls/chtls_cm.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/dax/super.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/libstub/tpm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bridge/sil-sii8620.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
drivers/gpu/drm/nouveau/dispnv50/wndw.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-ids.h
drivers/hid/hid-steam.c
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/wacom_sys.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/hwmon.c
drivers/hwmon/nct6775.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/i2c-core-smbus.c
drivers/iio/accel/mma8452.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/light/tsl2772.c
drivers/iio/pressure/bmp280-core.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hfi1/vnic_main.c
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
drivers/input/input-mt.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/goldfish_events.c
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/sc27xx-vibra.c [new file with mode: 0644]
drivers/input/mouse/elan_i2c.h
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elan_i2c_smbus.c
drivers/input/mouse/elantech.c
drivers/input/mouse/psmouse-base.c
drivers/input/rmi4/Kconfig
drivers/input/rmi4/rmi_2d_sensor.c
drivers/input/rmi4/rmi_bus.c
drivers/input/rmi4/rmi_bus.h
drivers/input/rmi4/rmi_driver.c
drivers/input/rmi4/rmi_f01.c
drivers/input/rmi4/rmi_f03.c
drivers/input/rmi4/rmi_f11.c
drivers/input/rmi4/rmi_f12.c
drivers/input/rmi4/rmi_f30.c
drivers/input/rmi4/rmi_f34.c
drivers/input/rmi4/rmi_f54.c
drivers/input/touchscreen/silead.c
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-ls-scfg-msi.c
drivers/isdn/capi/capi.c
drivers/isdn/capi/capidrv.c
drivers/isdn/gigaset/bas-gigaset.c
drivers/isdn/hardware/mISDN/avmfritz.c
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/isdn/hardware/mISDN/hfcsusb.c
drivers/isdn/hardware/mISDN/mISDNinfineon.c
drivers/isdn/hardware/mISDN/mISDNisar.c
drivers/isdn/hisax/avm_pci.c
drivers/isdn/hisax/callc.c
drivers/isdn/hisax/config.c
drivers/isdn/hisax/gazel.c
drivers/isdn/hisax/hfc_usb.c
drivers/isdn/hisax/isar.c
drivers/isdn/hisax/l3_1tr6.c
drivers/isdn/hisax/l3dss1.c
drivers/isdn/hisax/st5481_usb.c
drivers/isdn/hysdn/hysdn_boot.c
drivers/isdn/i4l/isdn_tty.c
drivers/isdn/i4l/isdn_v110.c
drivers/isdn/mISDN/socket.c
drivers/isdn/mISDN/stack.c
drivers/lightnvm/Kconfig
drivers/md/dm-raid.c
drivers/md/dm-table.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-writecache.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid10.c
drivers/media/rc/bpf-lirc.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/nand/raw/denali_dt.c
drivers/mtd/nand/raw/mxc_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/nand_macronix.c
drivers/mtd/nand/raw/nand_micron.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/Kconfig
drivers/net/dsa/Makefile
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/realtek-smi.c [new file with mode: 0644]
drivers/net/dsa/realtek-smi.h [new file with mode: 0644]
drivers/net/dsa/rtl8366.c [new file with mode: 0644]
drivers/net/dsa/rtl8366rb.c [new file with mode: 0644]
drivers/net/dsa/vitesse-vsc73xx.c [new file with mode: 0644]
drivers/net/ethernet/Makefile
drivers/net/ethernet/alteon/acenic.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/apm/xgene-v2/Kconfig
drivers/net/ethernet/apm/xgene/Kconfig
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
drivers/net/ethernet/aquantia/atlantic/ver.h
drivers/net/ethernet/arc/Kconfig
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/aurora/Kconfig
drivers/net/ethernet/aurora/nb8800.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cadence/macb_ptp.c
drivers/net/ethernet/calxeda/Kconfig
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
drivers/net/ethernet/cavium/liquidio/octeon_console.c
drivers/net/ethernet/cavium/liquidio/octeon_device.h
drivers/net/ethernet/cavium/liquidio/request_manager.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/sched.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/cisco/enic/enic_clsf.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cortina/gemini.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/fman/fman.c
drivers/net/ethernet/freescale/fman/fman.h
drivers/net/ethernet/freescale/fman/fman_dtsec.c
drivers/net/ethernet/freescale/fman/fman_dtsec.h
drivers/net/ethernet/freescale/fman/fman_memac.c
drivers/net/ethernet/freescale/fman/fman_memac.h
drivers/net/ethernet/freescale/fman/fman_port.c
drivers/net/ethernet/freescale/fman/fman_port.h
drivers/net/ethernet/freescale/fman/fman_tgec.c
drivers/net/ethernet/freescale/fman/fman_tgec.h
drivers/net/ethernet/freescale/fman/mac.c
drivers/net/ethernet/freescale/fman/mac.h
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/hisilicon/Kconfig
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hnae3.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
drivers/net/ethernet/huawei/hinic/hinic_rx.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/Makefile
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c [new file with mode: 0644]
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
drivers/net/ethernet/mellanox/mlx4/Makefile
drivers/net/ethernet/mellanox/mlx4/catas.c
drivers/net/ethernet/mellanox/mlx4/crdump.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlx5/core/wq.h
drivers/net/ethernet/mellanox/mlxsw/Kconfig
drivers/net/ethernet/mellanox/mlxsw/Makefile
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/resources.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c [moved from drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h with 59% similarity]
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/mellanox/mlxsw/trap.h
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot.h
drivers/net/ethernet/mscc/ocelot_board.c
drivers/net/ethernet/netronome/nfp/bpf/jit.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/bpf/main.h
drivers/net/ethernet/netronome/nfp/bpf/offload.c
drivers/net/ethernet/netronome/nfp/bpf/verifier.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/metadata.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/nfp_asm.h
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
drivers/net/ethernet/oki-semi/pch_gbe/Makefile
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c [deleted file]
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h [deleted file]
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
drivers/net/ethernet/packetengines/Kconfig
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
drivers/net/ethernet/realtek/Kconfig
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/sfc/Makefile
drivers/net/ethernet/sfc/ef10_sriov.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/sun/ldmvsw.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/fjes/fjes_main.c
drivers/net/geneve.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/bpqether.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macvlan.c
drivers/net/net_failover.c
drivers/net/netdevsim/Makefile
drivers/net/netdevsim/bpf.c
drivers/net/netdevsim/ipsec.c [new file with mode: 0644]
drivers/net/netdevsim/netdev.c
drivers/net/netdevsim/netdevsim.h
drivers/net/ntb_netdev.c
drivers/net/phy/Kconfig
drivers/net/phy/dp83tc811.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/mdio-mux-gpio.c
drivers/net/phy/phy.c
drivers/net/phy/realtek.c
drivers/net/phy/sfp.c
drivers/net/phy/vitesse.c
drivers/net/phy/xilinx_gmii2rgmii.c
drivers/net/ppp/pppoe.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/asix_devices.c
drivers/net/usb/catc.c
drivers/net/usb/cdc-phonet.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/hso.c
drivers/net/usb/kaweth.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/rtl8150.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/farsync.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wimax/i2400m/control.c
drivers/net/wimax/i2400m/fw.c
drivers/net/wimax/i2400m/netdev.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/broadcom/brcm80211/Kconfig
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/wireless/quantenna/qtnfmac/Kconfig
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/nfc/pn533/usb.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/core.c
drivers/of/of_mdio.c
drivers/opp/core.c
drivers/pci/Makefile
drivers/pci/controller/Kconfig
drivers/pci/hotplug/acpi_pcihp.c
drivers/perf/xgene_pmu.c
drivers/pinctrl/actions/pinctrl-owl.c
drivers/pinctrl/devicetree.c
drivers/pinctrl/mediatek/pinctrl-mt7622.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/pinctrl-single.c
drivers/ptp/Kconfig
drivers/ptp/ptp_chardev.c
drivers/ptp/ptp_qoriq.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_alias.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_eer.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h
drivers/s390/cio/Makefile
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_trace.h [new file with mode: 0644]
drivers/s390/net/Kconfig
drivers/s390/net/Makefile
drivers/s390/net/ism.h [new file with mode: 0644]
drivers/s390/net/ism_drv.c [new file with mode: 0644]
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2.h
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/ipr.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/xen-scsifront.c
drivers/soc/imx/gpcv2.c
drivers/soc/qcom/Kconfig
drivers/soc/renesas/rcar-sysc.c
drivers/staging/android/ion/ion_heap.c
drivers/staging/comedi/drivers/quatech_daqp_cs.c
drivers/staging/netlogic/xlr_net.c
drivers/staging/rtl8188eu/include/wifi.h
drivers/staging/rtl8188eu/os_dep/os_intfs.c
drivers/staging/rtl8712/wifi.h
drivers/staging/rtl8723bs/include/wifi.h
drivers/staging/rtl8723bs/os_dep/os_intfs.c
drivers/staging/rtlwifi/base.c
drivers/staging/typec/Kconfig
drivers/target/target_core_user.c
drivers/tty/n_tty.c
drivers/tty/serdev/core.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/vt/vt.c
drivers/usb/chipidea/host.c
drivers/usb/class/cdc-acm.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/hcd.h
drivers/usb/dwc2/hcd_intr.c
drivers/usb/dwc2/hcd_queue.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci-trace.h
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/serial/cp210x.c
drivers/usb/typec/tcpm.c
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi_acpi.c
drivers/vhost/net.c
drivers/xen/Makefile
drivers/xen/events/events_base.c
drivers/xen/grant-table.c
drivers/xen/manage.c
drivers/xen/privcmd-buf.c [new file with mode: 0644]
drivers/xen/privcmd.c
drivers/xen/privcmd.h
drivers/xen/xen-scsiback.c
fs/aio.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/ceph/inode.c
fs/cifs/cifs_debug.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smb2transport.c
fs/cifs/smbdirect.c
fs/cifs/trace.h
fs/cifs/transport.c
fs/eventfd.c
fs/eventpoll.c
fs/ext2/ext2.h
fs/ext2/super.c
fs/jfs/xattr.c
fs/nfs/delegation.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.h
fs/pipe.c
fs/proc/base.c
fs/proc/generic.c
fs/quota/dquot.c
fs/select.c
fs/timerfd.c
fs/udf/balloc.c
fs/udf/directory.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/udfdecl.h
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_rtbitmap.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_fsmap.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_trans.c
include/acpi/processor.h
include/asm-generic/qspinlock_types.h
include/crypto/if_alg.h
include/linux/acpi.h
include/linux/atmdev.h
include/linux/backing-dev-defs.h
include/linux/blkdev.h
include/linux/bpf-cgroup.h
include/linux/bpf.h
include/linux/bpf_lirc.h
include/linux/compat.h
include/linux/compiler-gcc.h
include/linux/compiler_types.h
include/linux/cpumask.h
include/linux/dax.h
include/linux/dma-contiguous.h
include/linux/etherdevice.h
include/linux/filter.h
include/linux/fs.h
include/linux/fsl/ptp_qoriq.h
include/linux/hwmon.h
include/linux/ieee80211.h
include/linux/if_team.h
include/linux/iio/buffer-dma.h
include/linux/input/mt.h
include/linux/ipc.h
include/linux/ipc_namespace.h
include/linux/irq.h
include/linux/irqdesc.h
include/linux/kernel.h
include/linux/list.h
include/linux/memory.h
include/linux/mlx4/device.h
include/linux/mlx5/eswitch.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/mlx5_ifc_fpga.h
include/linux/mod_devicetable.h
include/linux/mroute_base.h
include/linux/net.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/nfs_xdr.h
include/linux/openvswitch.h
include/linux/phy.h
include/linux/pm_domain.h
include/linux/poll.h
include/linux/reciprocal_div.h
include/linux/refcount.h
include/linux/rfkill.h
include/linux/rhashtable-types.h [new file with mode: 0644]
include/linux/rhashtable.h
include/linux/rmi.h
include/linux/scatterlist.h
include/linux/sched.h
include/linux/sctp.h
include/linux/sfp.h
include/linux/skbuff.h
include/linux/slub_def.h
include/linux/spinlock.h
include/linux/syscalls.h
include/linux/tcp.h
include/linux/udp.h
include/net/act_api.h
include/net/bluetooth/bluetooth.h
include/net/bonding.h
include/net/busy_poll.h
include/net/cfg80211.h
include/net/devlink.h
include/net/flow_dissector.h
include/net/ieee80211_radiotap.h
include/net/inet_common.h
include/net/inet_frag.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip_tunnels.h
include/net/ipv6.h
include/net/iucv/af_iucv.h
include/net/lag.h [new file with mode: 0644]
include/net/mac80211.h
include/net/net_namespace.h
include/net/netfilter/nf_flow_table.h
include/net/netfilter/nf_log.h
include/net/netns/hash.h
include/net/netns/ipv6.h
include/net/pkt_cls.h
include/net/pkt_sched.h
include/net/sch_generic.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/net/seg6.h
include/net/seg6_hmac.h
include/net/smc.h
include/net/sock.h
include/net/tc_act/tc_pedit.h
include/net/tc_act/tc_skbedit.h
include/net/tcp.h
include/net/tls.h
include/net/transp_v6.h
include/net/udp.h
include/net/udp_tunnel.h
include/net/xdp.h
include/rdma/ib_verbs.h
include/trace/events/net.h
include/trace/events/sock.h
include/uapi/asm-generic/socket.h
include/uapi/linux/aio_abi.h
include/uapi/linux/bpf.h
include/uapi/linux/devlink.h
include/uapi/linux/errqueue.h
include/uapi/linux/if_link.h
include/uapi/linux/ila.h
include/uapi/linux/mroute.h
include/uapi/linux/nbd.h
include/uapi/linux/net_tstamp.h
include/uapi/linux/nl80211.h
include/uapi/linux/openvswitch.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/sctp.h
include/uapi/linux/smc_diag.h
include/uapi/linux/snmp.h
include/uapi/linux/target_core_user.h
include/uapi/linux/tc_act/tc_pedit.h
include/uapi/linux/tc_act/tc_skbedit.h
include/uapi/linux/tc_act/tc_tunnel_key.h
include/uapi/linux/tipc_netlink.h
include/xen/xen.h
init/Kconfig
ipc/msg.c
ipc/sem.c
ipc/shm.c
ipc/util.c
kernel/Makefile
kernel/bpf/cgroup.c
kernel/bpf/core.c
kernel/bpf/devmap.c
kernel/bpf/sockmap.c
kernel/bpf/syscall.c
kernel/dma/Kconfig [new file with mode: 0644]
kernel/dma/Makefile [new file with mode: 0644]
kernel/dma/coherent.c [moved from drivers/base/dma-coherent.c with 100% similarity]
kernel/dma/contiguous.c [moved from drivers/base/dma-contiguous.c with 100% similarity]
kernel/dma/debug.c [moved from lib/dma-debug.c with 100% similarity]
kernel/dma/direct.c [moved from lib/dma-direct.c with 100% similarity]
kernel/dma/mapping.c [moved from drivers/base/dma-mapping.c with 99% similarity]
kernel/dma/noncoherent.c [moved from lib/dma-noncoherent.c with 100% similarity]
kernel/dma/swiotlb.c [moved from lib/swiotlb.c with 99% similarity]
kernel/dma/virt.c [moved from lib/dma-virt.c with 98% similarity]
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/irq/debugfs.c
kernel/locking/lockdep.c
kernel/locking/rwsem.c
kernel/rseq.c
kernel/softirq.c
kernel/time/hrtimer.c
kernel/time/posix-cpu-timers.c
kernel/time/time.c
kernel/trace/trace.c
kernel/trace/trace_events_filter.c
lib/Kconfig
lib/Kconfig.kasan
lib/Makefile
lib/dec_and_lock.c
lib/nlattr.c
lib/percpu_ida.c
lib/reciprocal_div.c
lib/refcount.c
lib/rhashtable.c
lib/scatterlist.c
lib/test_bpf.c
lib/test_printf.c
lib/test_rhashtable.c
mm/backing-dev.c
mm/memblock.c
mm/slab_common.c
mm/slub.c
mm/vmstat.c
net/8021q/vlan.c
net/Makefile
net/appletalk/ddp.c
net/atm/br2684.c
net/atm/clip.c
net/atm/common.c
net/atm/common.h
net/atm/lec.c
net/atm/mpc.c
net/atm/pppoatm.c
net/atm/pvc.c
net/atm/raw.c
net/atm/svc.c
net/ax25/af_ax25.c
net/batman-adv/bridge_loop_avoidance.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_sock.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bpfilter/.gitignore [new file with mode: 0644]
net/bpfilter/Kconfig
net/bpfilter/Makefile
net/bpfilter/bpfilter_kern.c
net/bpfilter/bpfilter_umh_blob.S [new file with mode: 0644]
net/caif/caif_socket.c
net/can/bcm.c
net/can/raw.c
net/core/datagram.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/devlink.c
net/core/ethtool.c
net/core/fib_rules.c
net/core/filter.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/xdp.c
net/dccp/ccids/ccid3.c
net/dccp/dccp.h
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/proto.c
net/decnet/af_decnet.c
net/decnet/dn_nsp_in.c
net/dsa/slave.c
net/ethernet/eth.c
net/ieee802154/socket.c
net/ipv4/af_inet.c
net/ipv4/esp4_offload.c
net/ipv4/fou.c
net/ipv4/gre_offload.c
net/ipv4/icmp.c
net/ipv4/inet_fragment.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ipmr.c
net/ipv4/ipmr_base.c
net/ipv4/netfilter/nf_log_ipv4.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/raw.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_output.c
net/ipv4/tcp_rate.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/esp6_offload.c
net/ipv6/icmp.c
net/ipv6/ila/Makefile
net/ipv6/ila/ila.h
net/ipv6/ila/ila_common.c
net/ipv6/ila/ila_main.c [new file with mode: 0644]
net/ipv6/ila/ila_xlat.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_log_ipv6.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/seg6.c
net/ipv6/seg6_hmac.c
net/ipv6/tcpv6_offload.c
net/ipv6/udp.c
net/ipv6/udp_offload.c
net/iucv/af_iucv.c
net/kcm/kcmsock.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_ppp.c
net/llc/af_llc.c
net/mac80211/Makefile
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/ethtool.c
net/mac80211/he.c [new file with mode: 0644]
net/mac80211/ht.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/ncsi/ncsi-aen.c
net/ncsi/ncsi-manage.c
net/netfilter/core.c
net/netfilter/nf_conncount.c
net/netfilter/nf_conntrack_broadcast.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_log.c
net/netfilter/nf_log_common.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_meta.c
net/netfilter/nft_socket.c
net/netfilter/xt_cgroup.c
net/netfilter/xt_owner.c
net/netfilter/xt_recent.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/nfc/llcp_sock.c
net/nfc/rawsock.c
net/openvswitch/actions.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/phonet/socket.c
net/qrtr/qrtr.c
net/rds/connection.c
net/rds/ib_recv.c
net/rds/loop.c
net/rds/loop.h
net/rose/af_rose.c
net/rxrpc/af_rxrpc.c
net/sched/Kconfig
net/sched/Makefile
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/act_tunnel_key.c
net/sched/act_vlan.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/cls_flower.c
net/sched/cls_matchall.c
net/sched/cls_u32.c
net/sched/sch_api.c
net/sched/sch_blackhole.c
net/sched/sch_cake.c [new file with mode: 0644]
net/sched/sch_etf.c [new file with mode: 0644]
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_netem.c
net/sctp/associola.c
net/sctp/chunk.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/protocol.c
net/sctp/socket.c
net/smc/Makefile
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_cdc.c
net/smc/smc_cdc.h
net/smc/smc_clc.c
net/smc/smc_clc.h
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_diag.c
net/smc/smc_ib.c
net/smc/smc_ib.h
net/smc/smc_ism.c [new file with mode: 0644]
net/smc/smc_ism.h [new file with mode: 0644]
net/smc/smc_pnet.c
net/smc/smc_pnet.h
net/smc/smc_rx.c
net/smc/smc_tx.c
net/smc/smc_tx.h
net/socket.c
net/strparser/strparser.c
net/sunrpc/xprt.c
net/tipc/bearer.c
net/tipc/group.c
net/tipc/group.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/node.c
net/tipc/node.h
net/tipc/socket.c
net/tls/tls_device.c
net/tls/tls_device_fallback.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport.c
net/wireless/core.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/sysfs.c
net/wireless/util.c
net/wireless/wext-compat.c
net/x25/af_x25.c
net/xdp/xsk.c
samples/bpf/Makefile
samples/bpf/xdp_fwd_kern.c
samples/bpf/xdp_redirect_cpu_kern.c
samples/bpf/xdp_rxq_info_kern.c
samples/bpf/xdp_rxq_info_user.c
samples/bpf/xdp_sample_pkts_kern.c [new file with mode: 0644]
samples/bpf/xdp_sample_pkts_user.c [new file with mode: 0644]
scripts/Makefile.build
scripts/cc-can-link.sh
scripts/checkpatch.pl
scripts/gcc-x86_64-has-stack-protector.sh
scripts/kconfig/expr.h
scripts/kconfig/preprocess.c
scripts/kconfig/zconf.y
security/keys/dh.c
security/selinux/selinuxfs.c
security/smack/smack_lsm.c
sound/core/seq/seq_clientmgr.c
sound/core/timer.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/lx6464es/lx6464es.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/unistd.h
tools/arch/x86/include/asm/cpufeatures.h
tools/bpf/Makefile.helpers [new file with mode: 0644]
tools/bpf/bpftool/Documentation/Makefile
tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
tools/bpf/bpftool/Documentation/bpftool-prog.rst
tools/bpf/bpftool/Makefile
tools/bpf/bpftool/bash-completion/bpftool
tools/bpf/bpftool/btf_dumper.c [new file with mode: 0644]
tools/bpf/bpftool/cgroup.c
tools/bpf/bpftool/common.c
tools/bpf/bpftool/main.c
tools/bpf/bpftool/main.h
tools/bpf/bpftool/map.c
tools/bpf/bpftool/perf.c
tools/bpf/bpftool/prog.c
tools/bpf/bpftool/xlated_dumper.c
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-reallocarray.c [new file with mode: 0644]
tools/include/linux/compiler-gcc.h
tools/include/linux/overflow.h [new file with mode: 0644]
tools/include/tools/libc_compat.h [new file with mode: 0644]
tools/include/uapi/drm/drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/lib/bpf/Build
tools/lib/bpf/Makefile
tools/lib/bpf/btf.c
tools/lib/bpf/btf.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/lib/bpf/libbpf_errno.c [new file with mode: 0644]
tools/objtool/check.c
tools/perf/Documentation/perf-stat.txt
tools/perf/arch/powerpc/util/skip-callchain-idx.c
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/bench/numa.c
tools/perf/builtin-annotate.c
tools/perf/builtin-c2c.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/tests/parse-events.c
tools/perf/tests/topology.c
tools/perf/ui/gtk/hists.c
tools/perf/util/c++/clang.cpp
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/sort.h
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/cgroup_helpers.c
tools/testing/selftests/bpf/cgroup_helpers.h
tools/testing/selftests/bpf/config
tools/testing/selftests/bpf/test_kmod.sh
tools/testing/selftests/bpf/test_lirc_mode2.sh
tools/testing/selftests/bpf/test_lwt_seg6local.sh
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/test_sock_addr.c
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/bpf/test_tcpbpf.h
tools/testing/selftests/bpf/test_tcpbpf_kern.c
tools/testing/selftests/bpf/test_tcpbpf_user.c
tools/testing/selftests/bpf/test_tunnel.sh
tools/testing/selftests/bpf/trace_helpers.c
tools/testing/selftests/bpf/trace_helpers.h
tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh [new file with mode: 0755]
tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/router_scale.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh [new file with mode: 0755]
tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh [new file with mode: 0755]
tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh [new file with mode: 0644]
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/config
tools/testing/selftests/net/fib_tests.sh [changed mode: 0644->0755]
tools/testing/selftests/net/forwarding/README
tools/testing/selftests/net/forwarding/bridge_port_isolation.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/devlink_lib.sh [new file with mode: 0644]
tools/testing/selftests/net/forwarding/gre_multipath.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/lib.sh
tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh
tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
tools/testing/selftests/net/forwarding/mirror_lib.sh
tools/testing/selftests/net/forwarding/router_bridge.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/router_bridge_vlan.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/router_multipath.sh
tools/testing/selftests/net/ip6_gre_headroom.sh [new file with mode: 0755]
tools/testing/selftests/net/rtnetlink.sh
tools/testing/selftests/net/tls.c [new file with mode: 0644]
tools/testing/selftests/pstore/pstore_post_reboot_tests
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/rseq/rseq-arm.h
tools/testing/selftests/rseq/rseq-mips.h [new file with mode: 0644]
tools/testing/selftests/rseq/rseq.h
tools/testing/selftests/rseq/run_param_test.sh [changed mode: 0644->0755]
tools/testing/selftests/sparc64/Makefile
tools/testing/selftests/sparc64/drivers/Makefile
tools/testing/selftests/static_keys/test_static_keys.sh
tools/testing/selftests/sync/config [new file with mode: 0644]
tools/testing/selftests/sysctl/sysctl.sh
tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json [new file with mode: 0644]
tools/testing/selftests/user/test_user_copy.sh
tools/testing/selftests/vm/compaction_test.c
tools/testing/selftests/vm/mlock2-tests.c
tools/testing/selftests/vm/run_vmtests
tools/testing/selftests/vm/userfaultfd.c
tools/testing/selftests/x86/sigreturn.c
tools/testing/selftests/zram/zram.sh
tools/testing/selftests/zram/zram_lib.sh
tools/virtio/linux/scatterlist.h
virt/kvm/Kconfig
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/kvm_main.c

index e1ba4a10475364cd8fcf201b323c26de5ee8b62b..80151a409d67ce02775e0fc249d09b075ac8ec6a 100644 (file)
@@ -11,7 +11,7 @@ KernelVersion:        v2.6.22
 Contact:       linux-wireless@vger.kernel.org,
 Description:   The rfkill class subsystem folder.
                Each registered rfkill driver is represented by an rfkillX
-               subfolder (X being an integer > 0).
+               subfolder (X being an integer >= 0).
 
 
 What:          /sys/class/rfkill/rfkill[0-9]+/name
@@ -48,8 +48,8 @@ Contact:      linux-wireless@vger.kernel.org
 Description:   Current state of the transmitter.
                This file was scheduled to be removed in 2014, but due to its
                large number of users it will be sticking around for a bit
-               longer. Despite it being marked as stabe, the newer "hard" and
-               "soft" interfaces should be preffered, since it is not possible
+               longer. Despite it being marked as stable, the newer "hard" and
+               "soft" interfaces should be preferred, since it is not possible
                to express the 'soft and hard block' state of the rfkill driver
                through this interface. There will likely be another attempt to
                remove it in the future.
index 0c0df91b1516fb087ae22f82a43dafa3fb44e858..978b76358661a91470436e73842abde367d7d70e 100644 (file)
@@ -42,6 +42,17 @@ Description:
                network device transmit queue. Possible vaules depend on the
                number of available CPU(s) in the system.
 
+What:          /sys/class/<iface>/queues/tx-<queue>/xps_rxqs
+Date:          June 2018
+KernelVersion: 4.18.0
+Contact:       netdev@vger.kernel.org
+Description:
+               Mask of the receive queue(s) currently enabled to participate
+               into the Transmit Packet Steering packet processing flow for this
+               network device transmit queue. Possible values depend on the
+               number of available receive queue(s) in the network device.
+               Default is disabled.
+
 What:          /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
 Date:          November 2011
 KernelVersion: 3.3
index ab2fe0eda1d7c317faefab52363ce96755ac64d5..8f1d3de449b53fedcc78d1aee506e6882f2be90c 100644 (file)
@@ -324,8 +324,7 @@ Global Attributes
 
 ``intel_pstate`` exposes several global attributes (files) in ``sysfs`` to
 control its functionality at the system level.  They are located in the
-``/sys/devices/system/cpu/cpufreq/intel_pstate/`` directory and affect all
-CPUs.
+``/sys/devices/system/cpu/intel_pstate/`` directory and affect all CPUs.
 
 Some of them are not present if the ``intel_pstate=per_cpu_perf_limits``
 argument is passed to the kernel in the command line.
@@ -379,6 +378,17 @@ argument is passed to the kernel in the command line.
        but it affects the maximum possible value of per-policy P-state limits
        (see `Interpretation of Policy Attributes`_ below for details).
 
+``hwp_dynamic_boost``
+       This attribute is only present if ``intel_pstate`` works in the
+       `active mode with the HWP feature enabled <Active Mode With HWP_>`_ in
+       the processor.  If set (equal to 1), it causes the minimum P-state limit
+       to be increased dynamically for a short time whenever a task previously
+       waiting on I/O is selected to run on a given logical CPU (the purpose
+       of this mechanism is to improve performance).
+
+       This setting has no effect on logical CPUs whose minimum P-state limit
+       is directly set to the highest non-turbo P-state or above it.
+
 .. _status_attr:
 
 ``status``
@@ -410,7 +420,7 @@ argument is passed to the kernel in the command line.
        That only is supported in some configurations, though (for example, if
        the `HWP feature is enabled in the processor <Active Mode With HWP_>`_,
        the operation mode of the driver cannot be changed), and if it is not
-       supported in the current configuration, writes to this attribute with
+       supported in the current configuration, writes to this attribute will
        fail with an appropriate error.
 
 Interpretation of Policy Attributes
index 8e44aea366c262068900cddaabd240d8615ac552..76fe2d0f5e7d7db307bfa4ead890ead2d8840bdd 100644 (file)
@@ -284,7 +284,7 @@ Resources Management
 MTRR Handling
 -------------
 
-.. kernel-doc:: arch/x86/kernel/cpu/mtrr/main.c
+.. kernel-doc:: arch/x86/kernel/cpu/mtrr/mtrr.c
    :export:
 
 Security Framework
diff --git a/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt b/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt
new file mode 100644 (file)
index 0000000..f2ec0d4
--- /dev/null
@@ -0,0 +1,23 @@
+Spreadtrum SC27xx PMIC Vibrator
+
+Required properties:
+- compatible: should be "sprd,sc2731-vibrator".
+- reg: address of vibrator control register.
+
+Example :
+
+       sc2731_pmic: pmic@0 {
+               compatible = "sprd,sc2731";
+               reg = <0>;
+               spi-max-frequency = <26000000>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               vibrator@eb4 {
+                       compatible = "sprd,sc2731-vibrator";
+                       reg = <0xeb4>;
+               };
+       };
diff --git a/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt b/Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
new file mode 100644 (file)
index 0000000..b6ae854
--- /dev/null
@@ -0,0 +1,153 @@
+Realtek SMI-based Switches
+==========================
+
+The SMI "Simple Management Interface" is a two-wire protocol using
+bit-banged GPIO that while it reuses the MDIO lines MCK and MDIO does
+not use the MDIO protocol. This binding defines how to specify the
+SMI-based Realtek devices.
+
+Required properties:
+
+- compatible: must be exactly one of:
+      "realtek,rtl8366"
+      "realtek,rtl8366rb" (4+1 ports)
+      "realtek,rtl8366s"  (4+1 ports)
+      "realtek,rtl8367"
+      "realtek,rtl8367b"
+      "realtek,rtl8368s"  (8 port)
+      "realtek,rtl8369"
+      "realtek,rtl8370"   (8 port)
+
+Required properties:
+- mdc-gpios: GPIO line for the MDC clock line.
+- mdio-gpios: GPIO line for the MDIO data line.
+- reset-gpios: GPIO line for the reset signal.
+
+Optional properties:
+- realtek,disable-leds: if the LED drivers are not used in the
+  hardware design this will disable them so they are not turned on
+  and wasting power.
+
+Required subnodes:
+
+- interrupt-controller
+
+  This defines an interrupt controller with an IRQ line (typically
+  a GPIO) that will demultiplex and handle the interrupt from the single
+  interrupt line coming out of one of the SMI-based chips. It most
+  importantly provides link up/down interrupts to the PHY blocks inside
+  the ASIC.
+
+Required properties of interrupt-controller:
+
+- interrupt: parent interrupt, see interrupt-controller/interrupts.txt
+- interrupt-controller: see interrupt-controller/interrupts.txt
+- #address-cells: should be <0>
+- #interrupt-cells: should be <1>
+
+- mdio
+
+  This defines the internal MDIO bus of the SMI device, mostly for the
+  purpose of being able to hook the interrupts to the right PHY and
+  the right PHY to the corresponding port.
+
+Required properties of mdio:
+
+- compatible: should be set to "realtek,smi-mdio" for all SMI devices
+
+See net/mdio.txt for additional MDIO bus properties.
+
+See net/dsa/dsa.txt for a list of additional required and optional properties
+and subnodes of DSA switches.
+
+Examples:
+
+switch {
+       compatible = "realtek,rtl8366rb";
+       /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */
+       mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
+       mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
+       reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
+
+       switch_intc: interrupt-controller {
+               /* GPIO 15 provides the interrupt */
+               interrupt-parent = <&gpio0>;
+               interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+               interrupt-controller;
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+       };
+
+       ports {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0>;
+               port@0 {
+                       reg = <0>;
+                       label = "lan0";
+                       phy-handle = <&phy0>;
+               };
+               port@1 {
+                       reg = <1>;
+                       label = "lan1";
+                       phy-handle = <&phy1>;
+               };
+               port@2 {
+                       reg = <2>;
+                       label = "lan2";
+                       phy-handle = <&phy2>;
+               };
+               port@3 {
+                       reg = <3>;
+                       label = "lan3";
+                       phy-handle = <&phy3>;
+               };
+               port@4 {
+                       reg = <4>;
+                       label = "wan";
+                       phy-handle = <&phy4>;
+               };
+               port@5 {
+                       reg = <5>;
+                       label = "cpu";
+                       ethernet = <&gmac0>;
+                       phy-mode = "rgmii";
+                       fixed-link {
+                               speed = <1000>;
+                               full-duplex;
+                       };
+               };
+       };
+
+       mdio {
+               compatible = "realtek,smi-mdio", "dsa-mdio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               phy0: phy@0 {
+                       reg = <0>;
+                       interrupt-parent = <&switch_intc>;
+                       interrupts = <0>;
+               };
+               phy1: phy@1 {
+                       reg = <1>;
+                       interrupt-parent = <&switch_intc>;
+                       interrupts = <1>;
+               };
+               phy2: phy@2 {
+                       reg = <2>;
+                       interrupt-parent = <&switch_intc>;
+                       interrupts = <2>;
+               };
+               phy3: phy@3 {
+                       reg = <3>;
+                       interrupt-parent = <&switch_intc>;
+                       interrupts = <3>;
+               };
+               phy4: phy@4 {
+                       reg = <4>;
+                       interrupt-parent = <&switch_intc>;
+                       interrupts = <12>;
+               };
+       };
+};
diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt
new file mode 100644 (file)
index 0000000..ed4710c
--- /dev/null
@@ -0,0 +1,81 @@
+Vitesse VSC73xx Switches
+========================
+
+This defines device tree bindings for the Vitesse VSC73xx switch chips.
+The Vitesse company has been acquired by Microsemi and Microsemi in turn
+acquired by Microchip but retains this vendor branding.
+
+The currently supported switch chips are:
+Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+
+The device tree node is an SPI device so it must reside inside a SPI bus
+device tree node, see spi/spi-bus.txt
+
+Required properties:
+
+- compatible: must be exactly one of:
+       "vitesse,vsc7385"
+       "vitesse,vsc7388"
+       "vitesse,vsc7395"
+       "vitesse,vsc7398"
+- gpio-controller: indicates that this switch is also a GPIO controller,
+  see gpio/gpio.txt
+- #gpio-cells: this must be set to <2> and indicates that we are a twocell
+  GPIO controller, see gpio/gpio.txt
+
+Optional properties:
+
+- reset-gpios: a handle to a GPIO line that can issue reset of the chip.
+  It should be tagged as active low.
+
+Required subnodes:
+
+See net/dsa/dsa.txt for a list of additional required and optional properties
+and subnodes of DSA switches.
+
+Examples:
+
+switch@0 {
+       compatible = "vitesse,vsc7395";
+       reg = <0>;
+       /* Specified for 2.5 MHz or below */
+       spi-max-frequency = <2500000>;
+       gpio-controller;
+       #gpio-cells = <2>;
+
+       ports {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               port@0 {
+                       reg = <0>;
+                       label = "lan1";
+               };
+               port@1 {
+                       reg = <1>;
+                       label = "lan2";
+               };
+               port@2 {
+                       reg = <2>;
+                       label = "lan3";
+               };
+               port@3 {
+                       reg = <3>;
+                       label = "lan4";
+               };
+               vsc: port@6 {
+                       reg = <6>;
+                       label = "cpu";
+                       ethernet = <&gmac1>;
+                       phy-mode = "rgmii";
+                       fixed-link {
+                               speed = <1000>;
+                               full-duplex;
+                               pause;
+                       };
+               };
+       };
+};
index df873d1f3b7c598b6c30721d3eec915a20ea8621..74603dd0789e7c378f9b86caa61c08611ede3bf4 100644 (file)
@@ -356,30 +356,7 @@ ethernet@e0000 {
 ============================================================================
 FMan IEEE 1588 Node
 
-DESCRIPTION
-
-The FMan interface to support IEEE 1588
-
-
-PROPERTIES
-
-- compatible
-               Usage: required
-               Value type: <stringlist>
-               Definition: A standard property.
-               Must include "fsl,fman-ptp-timer".
-
-- reg
-               Usage: required
-               Value type: <prop-encoded-array>
-               Definition: A standard property.
-
-EXAMPLE
-
-ptp-timer@fe000 {
-       compatible = "fsl,fman-ptp-timer";
-       reg = <0xfe000 0x1000>;
-};
+Refer to Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
 
 =============================================================================
 FMan MDIO Node
index 9c16ee2965a2ce756acc23d6956fedc38157144b..3b71da7e87427759729fc7874d048c4c22f5eee0 100644 (file)
@@ -4,6 +4,7 @@ The device node has following properties.
 
 Required properties:
  - compatible: should be "rockchip,<name>-gamc"
+   "rockchip,px30-gmac":   found on PX30 SoCs
    "rockchip,rk3128-gmac": found on RK312x SoCs
    "rockchip,rk3228-gmac": found on RK322x SoCs
    "rockchip,rk3288-gmac": found on RK3288 SoCs
index 0f569d8e73a3cb09fa1fb9d9dcf27375e3aad6ed..c5d0e7998e2b0a09546ab068dade2176b194d864 100644 (file)
@@ -2,7 +2,8 @@
 
 General Properties:
 
-  - compatible   Should be "fsl,etsec-ptp"
+  - compatible   Should be "fsl,etsec-ptp" for eTSEC
+                 Should be "fsl,fman-ptp-timer" for DPAA FMan
   - reg          Offset and length of the register set for the device
   - interrupts   There should be at least two interrupts. Some devices
                  have as many as four PTP related interrupts.
@@ -43,14 +44,22 @@ Clock Properties:
   value, which will be directly written in those bits, that is why,
   according to reference manual, the next clock sources can be used:
 
+  For eTSEC,
   <0> - external high precision timer reference clock (TSEC_TMR_CLK
         input is used for this purpose);
   <1> - eTSEC system clock;
   <2> - eTSEC1 transmit clock;
   <3> - RTC clock input.
 
-  When this attribute is not used, eTSEC system clock will serve as
-  IEEE 1588 timer reference clock.
+  For DPAA FMan,
+  <0> - external high precision timer reference clock (TMR_1588_CLK)
+  <1> - MAC system clock (1/2 FMan clock)
+  <2> - reserved
+  <3> - RTC clock oscillator
+
+  When this attribute is not used, the IEEE 1588 timer reference clock
+  will use the eTSEC system clock (for Gianfar) or the MAC system
+  clock (for DPAA).
 
 Example:
 
index 7cad066191eeb8e6c9711cb81fd50283362fcf4d..3e5398f87eac443b8d6c544c85b8691911f37bd1 100644 (file)
@@ -395,6 +395,7 @@ v3  V3 Semiconductor
 variscite      Variscite Ltd.
 via    VIA Technologies, Inc.
 virtio Virtual I/O Device Specification, developed by the OASIS consortium
+vitesse        Vitesse Semiconductor Corporation
 vivante        Vivante Corporation
 vocore VoCore Studio
 voipac Voipac Technologies s.r.o.
index bee1b9a1702f1cc6c89811aff6b8bdbc1eefb0b0..6172f3cc3d0b2109916cfccda2da1836065f8766 100644 (file)
@@ -49,10 +49,10 @@ Device Drivers Base
 Device Drivers DMA Management
 -----------------------------
 
-.. kernel-doc:: drivers/base/dma-coherent.c
+.. kernel-doc:: kernel/dma/coherent.c
    :export:
 
-.. kernel-doc:: drivers/base/dma-mapping.c
+.. kernel-doc:: kernel/dma/mapping.c
    :export:
 
 Device drivers PnP support
index 2c391338c6757f505eac6dfcbe98a169452ad305..37bf0a9de75cbe79794e653ff161e4a5eb37a97a 100644 (file)
@@ -441,8 +441,6 @@ prototypes:
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
@@ -473,7 +471,7 @@ prototypes:
 };
 
 locking rules:
-       All except for ->poll_mask may block.
+       All may block.
 
 ->llseek() locking has moved from llseek to the individual llseek
 implementations.  If your fs is not using generic_file_llseek, you
@@ -505,9 +503,6 @@ in sys_read() and friends.
 the lease within the individual filesystem to record the result of the
 operation
 
-->poll_mask can be called with or without the waitqueue lock for the waitqueue
-returned from ->get_poll_head.
-
 --------------------------- dquot_operations -------------------------------
 prototypes:
        int (*write_dquot) (struct dquot *);
index 9f4f87e1624036349533adf9534bfd3c4b08535d..75865da2ce1475c27bea1050b3e80ff7160f6d6f 100644 (file)
@@ -42,9 +42,11 @@ Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
 Scott Lovenberg
 Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features)
 Aurelien Aptel (for DFS SMB3 work and some key bug fixes)
-Ronnie Sahlberg (for SMB3 xattr work and bug fixes)
+Ronnie Sahlberg (for SMB3 xattr work, bug fixes, and lots of great work on compounding)
 Shirish Pargaonkar (for many ACL patches over the years)
 Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security)
+Paulo Alcantara
+Long Li (some great work on RDMA, SMB Direct)
 
 
 Test case and Bug Report contributors
@@ -58,5 +60,4 @@ mention to the Stanford Checker (SWAT) which pointed out many minor
 bugs in error paths.  Valuable suggestions also have come from Al Viro
 and Dave Miller.
 
-And thanks to the IBM LTC and Power test teams and SuSE testers for
-finding multiple bugs during excellent stress test runs.
+And thanks to the IBM LTC and Power test teams and SuSE and Citrix and RedHat testers for finding multiple bugs during excellent stress test runs.
index bc0025cdd1c9c0d285c32e8d7656103868126ca8..455e1cc494a9f2e78ee1d45b89bfbe5ee55048eb 100644 (file)
@@ -1,3 +1,6 @@
+See https://wiki.samba.org/index.php/LinuxCIFSKernel for
+more current information.
+
 Version 1.62
 ------------
 Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
index c5adf149b57f7f8f6e2d0b104d5b74f6bc7f5f84..852499aed64b52bb321c0b9656b0b606a4710772 100644 (file)
@@ -9,14 +9,14 @@ is a partial list of the known problems and missing features:
 
 a) SMB3 (and SMB3.02) missing optional features:
    - multichannel (started), integration with RDMA
-   - directory leases (improved metadata caching)
-   - T10 copy offload (copy chunk, and "Duplicate Extents" ioctl
+   - directory leases (improved metadata caching), started (root dir only)
+   - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
      currently the only two server side copy mechanisms supported)
 
 b) improved sparse file support
 
 c) Directory entry caching relies on a 1 second timer, rather than
-using Directory Leases
+using Directory Leases, currently only the root file handle is cached longer
 
 d) quota support (needs minor kernel change since quota calls
 to make it to network filesystems or deviceless filesystems)
@@ -42,6 +42,8 @@ mount or a per server basis to client UIDs or nobody if no mapping
 exists. Also better integration with winbind for resolving SID owners
 
 k) Add tools to take advantage of more smb3 specific ioctls and features
+(passthrough ioctl/fsctl for sending various SMB3 fsctls to the server
+is in progress)
 
 l) encrypted file support
 
@@ -71,9 +73,8 @@ t) split cifs and smb3 support into separate modules so legacy (and less
 secure) CIFS dialect can be disabled in environments that don't need it
 and simplify the code.
 
-u) Finish up SMB3.1.1 dialect support
-
-v) POSIX Extensions for SMB3.1.1
+v) POSIX Extensions for SMB3.1.1 (started, create and mkdir support added
+so far).
 
 KNOWN BUGS
 ====================================
@@ -92,8 +93,8 @@ Misc testing to do
 1) check out max path names and max path name components against various server
 types. Try nested symlinks (8 deep). Return max path name in stat -f information
 
-2) Improve xfstest's cifs enablement and adapt xfstests where needed to test
-cifs better
+2) Improve xfstest's cifs/smb3 enablement and adapt xfstests where needed to test
+cifs/smb3 better
 
 3) Additional performance testing and optimization using iozone and similar - 
 there are some easy changes that can be done to parallelize sequential writes,
index 829a7b7857a46904cfb7f02646212504a3a7f259..f608180ad59d71ab2bcc2d2d818699bfaaee1470 100644 (file)
@@ -857,8 +857,6 @@ struct file_operations {
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
@@ -903,17 +901,6 @@ otherwise noted.
        activity on this file and (optionally) go to sleep until there
        is activity. Called by the select(2) and poll(2) system calls
 
-  get_poll_head: Returns the struct wait_queue_head that callers can
-  wait on.  Callers need to check the returned events using ->poll_mask
-  once woken.  Can return NULL to indicate polling is not supported,
-  or any error code using the ERR_PTR convention to indicate that a
-  grave error occured and ->poll_mask shall not be called.
-
-  poll_mask: return the mask of EPOLL* values describing the file descriptor
-  state.  Called either before going to sleep on the waitqueue returned by
-  get_poll_head, or after it has been woken.  If ->get_poll_head and
-  ->poll_mask are implemented ->poll does not need to be implement.
-
   unlocked_ioctl: called by the ioctl(2) system call.
 
   compat_ioctl: called by the ioctl(2) system call when 32 bit system calls
index 3534a84d206caf324423a9422eb985b48c97813b..64e0775a62d4475ec378d033332b5099987954ff 100644 (file)
@@ -430,6 +430,12 @@ This sets the config program's title bar if the config program chooses
 to use it. It should be placed at the top of the configuration, before any
 other statement.
 
+'#' Kconfig source file comment:
+
+An unquoted '#' character anywhere in a source file line indicates
+the beginning of a source file comment.  The remainder of that line
+is a comment.
+
 
 Kconfig hints
 -------------
index d4d8370279254472ed812488ea61d4e3bd64651b..9708f5fa76de2deae7ddf562e757c3d9c4e90079 100644 (file)
@@ -1,3 +1,4 @@
+==============================================================
 Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
 ==============================================================
 
@@ -86,83 +87,84 @@ Event Log Message Level:  The driver uses the message level flag to log events
 Additional Configurations
 =========================
 
-  Configuring the Driver on Different Distributions
-  -------------------------------------------------
+Configuring the Driver on Different Distributions
+-------------------------------------------------
 
-  Configuring a network driver to load properly when the system is started is
-  distribution dependent. Typically, the configuration process involves adding
-  an alias line to /etc/modprobe.d/*.conf as well as editing other system
-  startup scripts and/or configuration files.  Many popular Linux
-  distributions ship with tools to make these changes for you. To learn the
-  proper way to configure a network device for your system, refer to your
-  distribution documentation.  If during this process you are asked for the
-  driver or module name, the name for the Linux Base Driver for the Intel
-  PRO/100 Family of Adapters is e100.
+Configuring a network driver to load properly when the system is started
+is distribution dependent.  Typically, the configuration process involves
+adding an alias line to /etc/modprobe.d/*.conf as well as editing other
+system startup scripts and/or configuration files.  Many popular Linux
+distributions ship with tools to make these changes for you.  To learn
+the proper way to configure a network device for your system, refer to
+your distribution documentation.  If during this process you are asked
+for the driver or module name, the name for the Linux Base Driver for
+the Intel PRO/100 Family of Adapters is e100.
 
-  As an example, if you install the e100 driver for two PRO/100 adapters
-  (eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/
+As an example, if you install the e100 driver for two PRO/100 adapters
+(eth0 and eth1), add the following to a configuration file in
+/etc/modprobe.d/::
 
        alias eth0 e100
        alias eth1 e100
 
-  Viewing Link Messages
-  ---------------------
-  In order to see link messages and other Intel driver information on your
-  console, you must set the dmesg level up to six. This can be done by
-  entering the following on the command line before loading the e100 driver::
-
-       dmesg -n 6
+Viewing Link Messages
+---------------------
 
-  If you wish to see all messages issued by the driver, including debug
-  messages, set the dmesg level to eight.
+In order to see link messages and other Intel driver information on your
+console, you must set the dmesg level up to six.  This can be done by
+entering the following on the command line before loading the e100
+driver::
 
-  NOTE: This setting is not saved across reboots.
+       dmesg -n 6
 
+If you wish to see all messages issued by the driver, including debug
+messages, set the dmesg level to eight.
 
-  ethtool
-  -------
+NOTE: This setting is not saved across reboots.
 
-  The driver utilizes the ethtool interface for driver configuration and
-  diagnostics, as well as displaying statistical information.  The ethtool
-  version 1.6 or later is required for this functionality.
+ethtool
+-------
 
-  The latest release of ethtool can be found from
-  https://www.kernel.org/pub/software/network/ethtool/
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information.  The ethtool
+version 1.6 or later is required for this functionality.
 
-  Enabling Wake on LAN* (WoL)
-  ---------------------------
-  WoL is provided through the ethtool* utility.  For instructions on enabling
-  WoL with ethtool, refer to the ethtool man page.
+The latest release of ethtool can be found from
+https://www.kernel.org/pub/software/network/ethtool/
 
-  WoL will be enabled on the system during the next shut down or reboot. For
-  this driver version, in order to enable WoL, the e100 driver must be
-  loaded when shutting down or rebooting the system.
+Enabling Wake on LAN* (WoL)
+---------------------------
+WoL is provided through the ethtool* utility.  For instructions on
+enabling WoL with ethtool, refer to the ethtool man page.  WoL will be
+enabled on the system during the next shut down or reboot.  For this
+driver version, in order to enable WoL, the e100 driver must be loaded
+when shutting down or rebooting the system.
 
-  NAPI
-  ----
+NAPI
+----
 
-  NAPI (Rx polling mode) is supported in the e100 driver.
+NAPI (Rx polling mode) is supported in the e100 driver.
 
-  See https://wiki.linuxfoundation.org/networking/napi for more information
-  on NAPI.
+See https://wiki.linuxfoundation.org/networking/napi for more
+information on NAPI.
 
-  Multiple Interfaces on Same Ethernet Broadcast Network
-  ------------------------------------------------------
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
 
-  Due to the default ARP behavior on Linux, it is not possible to have
-  one system on two IP networks in the same Ethernet broadcast domain
-  (non-partitioned switch) behave as expected. All Ethernet interfaces
-  will respond to IP traffic for any IP address assigned to the system.
-  This results in unbalanced receive traffic.
+Due to the default ARP behavior on Linux, it is not possible to have one
+system on two IP networks in the same Ethernet broadcast domain
+(non-partitioned switch) behave as expected.  All Ethernet interfaces
+will respond to IP traffic for any IP address assigned to the system.
+This results in unbalanced receive traffic.
 
-  If you have multiple interfaces in a server, either turn on ARP
-  filtering by
+If you have multiple interfaces in a server, either turn on ARP
+filtering by
 
-  (1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
-      (this only works if your kernel's version is higher than 2.4.5), or
+(1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
+    (this only works if your kernel's version is higher than 2.4.5), or
 
-  (2) installing the interfaces in separate broadcast domains (either
-      in different switches or in a switch partitioned to VLANs).
+(2) installing the interfaces in separate broadcast domains (either
+    in different switches or in a switch partitioned to VLANs).
 
 
 Support
index 616848940e63f7303633e0be67febc86bee6ac6f..144b87eef15341059b06655dcc1b0b65c5822b2a 100644 (file)
@@ -1,3 +1,4 @@
+===========================================================
 Linux* Base Driver for Intel(R) Ethernet Network Connection
 ===========================================================
 
@@ -354,57 +355,58 @@ previously mentioned to force the adapter to the same speed and duplex.
 Additional Configurations
 =========================
 
-  Jumbo Frames
-  ------------
-  Jumbo Frames support is enabled by changing the MTU to a value larger than
-  the default of 1500.  Use the ifconfig command to increase the MTU size.
-  For example::
+Jumbo Frames
+------------
+Jumbo Frames support is enabled by changing the MTU to a value larger
+than the default of 1500.  Use the ifconfig command to increase the MTU
+size.  For example::
 
        ifconfig eth<x> mtu 9000 up
 
-  This setting is not saved across reboots.  It can be made permanent if
-  you add::
+This setting is not saved across reboots.  It can be made permanent if
+you add::
 
        MTU=9000
 
-   to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>.  This example
-   applies to the Red Hat distributions; other distributions may store this
-   setting in a different location.
+to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>.  This example
+applies to the Red Hat distributions; other distributions may store this
+setting in a different location.
+
+Notes: Degradation in throughput performance may be observed in some
+Jumbo frames environments.  If this is observed, increasing the
+application's socket buffer size and/or increasing the
+/proc/sys/net/ipv4/tcp_*mem entry values may help.  See the specific
+application manual and /usr/src/linux*/Documentation/
+networking/ip-sysctl.txt for more details.
 
-  Notes:
-  Degradation in throughput performance may be observed in some Jumbo frames
-  environments. If this is observed, increasing the application's socket buffer
-  size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
-  See the specific application manual and /usr/src/linux*/Documentation/
-  networking/ip-sysctl.txt for more details.
+- The maximum MTU setting for Jumbo Frames is 16110.  This value
+  coincides with the maximum Jumbo Frames size of 16128.
 
-  - The maximum MTU setting for Jumbo Frames is 16110.  This value coincides
-    with the maximum Jumbo Frames size of 16128.
+- Using Jumbo frames at 10 or 100 Mbps is not supported and may result
+  in poor performance or loss of link.
 
-  - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
-    poor performance or loss of link.
+- Adapters based on the Intel(R) 82542 and 82573V/E controller do not
+  support Jumbo Frames.  These correspond to the following product names:
+  Intel(R) PRO/1000 Gigabit Server Adapter Intel(R) PRO/1000 PM Network
+  Connection
 
-  - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
-    support Jumbo Frames. These correspond to the following product names:
-     Intel(R) PRO/1000 Gigabit Server Adapter
-     Intel(R) PRO/1000 PM Network Connection
+ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information.  The ethtool
+version 1.6 or later is required for this functionality.
 
-  ethtool
-  -------
-  The driver utilizes the ethtool interface for driver configuration and
-  diagnostics, as well as displaying statistical information.  The ethtool
-  version 1.6 or later is required for this functionality.
+The latest release of ethtool can be found from
+https://www.kernel.org/pub/software/network/ethtool/
 
-  The latest release of ethtool can be found from
-  https://www.kernel.org/pub/software/network/ethtool/
+Enabling Wake on LAN* (WoL)
+---------------------------
+WoL is configured through the ethtool* utility.
 
-  Enabling Wake on LAN* (WoL)
-  ---------------------------
-  WoL is configured through the ethtool* utility.
+WoL will be enabled on the system during the next shut down or reboot.
+For this driver version, in order to enable WoL, the e1000 driver must be
+loaded when shutting down or rebooting the system.
 
-  WoL will be enabled on the system during the next shut down or reboot.
-  For this driver version, in order to enable WoL, the e1000 driver must be
-  loaded when shutting down or rebooting the system.
 
 Support
 =======
index fec8588a588ee2ffb93ef0bc7cf52e3bb8f98397..6123a7e9e1dae0f87422bc090fd7e1ee98faaef9 100644 (file)
@@ -15,6 +15,8 @@ Contents:
    kapi
    z8530book
    msg_zerocopy
+   failover
+   net_failover
 
 .. only::  subproject
 
index ce8fbf5aa63ca36c8cd26b355818662e5c44376f..77c37fb0b6a676ee72261e923a8cc785b78adebf 100644 (file)
@@ -733,11 +733,11 @@ tcp_limit_output_bytes - INTEGER
        Controls TCP Small Queue limit per tcp socket.
        TCP bulk sender tends to increase packets in flight until it
        gets losses notifications. With SNDBUF autotuning, this can
-       result in a large amount of packets queued in qdisc/device
-       on the local machine, hurting latency of other flows, for
-       typical pfifo_fast qdiscs.
-       tcp_limit_output_bytes limits the number of bytes on qdisc
-       or device to reduce artificial RTT/cwnd and reduce bufferbloat.
+       result in a large amount of packets queued on the local machine
+       (e.g.: qdiscs, CPU backlog, or device) hurting latency of other
+       flows, for typical pfifo_fast qdiscs.  tcp_limit_output_bytes
+       limits the number of bytes on qdisc or device to reduce artificial
+       RTT/cwnd and reduce bufferbloat.
        Default: 262144
 
 tcp_challenge_ack_limit - INTEGER
@@ -1834,6 +1834,16 @@ stable_secret - IPv6 address
 
        By default the stable secret is unset.
 
+addr_gen_mode - INTEGER
+       Defines how link-local and autoconf addresses are generated.
+
+       0: generate address based on EUI64 (default)
+       1: do no generate a link-local address, use EUI64 for addresses generated
+          from autoconf
+       2: generate stable privacy addresses, using the secret from
+          stable_secret (RFC7217)
+       3: generate stable privacy addresses, using a random secret if unset
+
 drop_unicast_in_l2_multicast - BOOLEAN
        Drop any unicast IPv6 packets that are received in link-layer
        multicast (or broadcast) frames.
index 70ca2f5800c434a21734879bd753ed1666dfcd61..06c97dcb57caee07743c55d7a500b2a42f1ff0a8 100644 (file)
@@ -36,37 +36,39 @@ feature on the virtio-net interface and assign the same MAC address to both
 virtio-net and VF interfaces.
 
 Here is an example XML snippet that shows such configuration.
-
- <interface type='network'>
-   <mac address='52:54:00:00:12:53'/>
-   <source network='enp66s0f0_br'/>
-   <target dev='tap01'/>
-   <model type='virtio'/>
-   <driver name='vhost' queues='4'/>
-   <link state='down'/>
-   <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/>
- </interface>
- <interface type='hostdev' managed='yes'>
-   <mac address='52:54:00:00:12:53'/>
-   <source>
-     <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
-   </source>
-   <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
- </interface>
+::
+
+  <interface type='network'>
+    <mac address='52:54:00:00:12:53'/>
+    <source network='enp66s0f0_br'/>
+    <target dev='tap01'/>
+    <model type='virtio'/>
+    <driver name='vhost' queues='4'/>
+    <link state='down'/>
+    <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/>
+  </interface>
+  <interface type='hostdev' managed='yes'>
+    <mac address='52:54:00:00:12:53'/>
+    <source>
+      <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
+    </source>
+    <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
+  </interface>
 
 Booting a VM with the above configuration will result in the following 3
 netdevs created in the VM.
-
-4: ens10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
-    link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
-    inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10
-       valid_lft 42482sec preferred_lft 42482sec
-    inet6 fe80::97d8:db2:8c10:b6d6/64 scope link
-       valid_lft forever preferred_lft forever
-5: ens10nsby: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000
-    link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
-7: ens11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ens10 state UP group default qlen 1000
-    link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
+::
+
+  4: ens10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
+      link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
+      inet 192.168.12.53/24 brd 192.168.12.255 scope global dynamic ens10
+         valid_lft 42482sec preferred_lft 42482sec
+      inet6 fe80::97d8:db2:8c10:b6d6/64 scope link
+         valid_lft forever preferred_lft forever
+  5: ens10nsby: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel master ens10 state UP group default qlen 1000
+      link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
+  7: ens11: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq master ens10 state UP group default qlen 1000
+      link/ether 52:54:00:00:12:53 brd ff:ff:ff:ff:ff:ff
 
 ens10 is the 'failover' master netdev, ens10nsby and ens11 are the slave
 'standby' and 'primary' netdevs respectively.
@@ -80,37 +82,38 @@ the paravirtual datapath when the VF is unplugged.
 
 Here is a sample script that shows the steps to initiate live migration on
 the source hypervisor.
+::
 
-# cat vf_xml
-<interface type='hostdev' managed='yes'>
-  <mac address='52:54:00:00:12:53'/>
-  <source>
-    <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
-  </source>
-  <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
-</interface>
+  # cat vf_xml
+  <interface type='hostdev' managed='yes'>
+    <mac address='52:54:00:00:12:53'/>
+    <source>
+      <address type='pci' domain='0x0000' bus='0x42' slot='0x02' function='0x5'/>
+    </source>
+    <address type='pci' domain='0x0000' bus='0x00' slot='0x0b' function='0x0'/>
+  </interface>
 
-# Source Hypervisor
-#!/bin/bash
+  # Source Hypervisor
+  #!/bin/bash
 
-DOMAIN=fedora27-tap01
-PF=enp66s0f0
-VF_NUM=5
-TAP_IF=tap01
-VF_XML=
+  DOMAIN=fedora27-tap01
+  PF=enp66s0f0
+  VF_NUM=5
+  TAP_IF=tap01
+  VF_XML=
 
-MAC=52:54:00:00:12:53
-ZERO_MAC=00:00:00:00:00:00
+  MAC=52:54:00:00:12:53
+  ZERO_MAC=00:00:00:00:00:00
 
-virsh domif-setlink $DOMAIN $TAP_IF up
-bridge fdb del $MAC dev $PF master
-virsh detach-device $DOMAIN $VF_XML
-ip link set $PF vf $VF_NUM mac $ZERO_MAC
+  virsh domif-setlink $DOMAIN $TAP_IF up
+  bridge fdb del $MAC dev $PF master
+  virsh detach-device $DOMAIN $VF_XML
+  ip link set $PF vf $VF_NUM mac $ZERO_MAC
 
-virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system
+  virsh migrate --live $DOMAIN qemu+ssh://$REMOTE_HOST/system
 
-# Destination Hypervisor
-#!/bin/bash
+  # Destination Hypervisor
+  #!/bin/bash
 
-virsh attach-device $DOMAIN $VF_XML
-virsh domif-setlink $DOMAIN $TAP_IF down
+  virsh attach-device $DOMAIN $VF_XML
+  virsh domif-setlink $DOMAIN $TAP_IF down
index f55639d71d35b8c466252808cf9b0e58d42b7f30..b7056a8a0540682163a39f90aeb38ec8314bf77d 100644 (file)
@@ -366,8 +366,13 @@ XPS: Transmit Packet Steering
 
 Transmit Packet Steering is a mechanism for intelligently selecting
 which transmit queue to use when transmitting a packet on a multi-queue
-device. To accomplish this, a mapping from CPU to hardware queue(s) is
-recorded. The goal of this mapping is usually to assign queues
+device. This can be accomplished by recording two kinds of maps, either
+a mapping of CPU to hardware queue(s) or a mapping of receive queue(s)
+to hardware transmit queue(s).
+
+1. XPS using CPUs map
+
+The goal of this mapping is usually to assign queues
 exclusively to a subset of CPUs, where the transmit completions for
 these queues are processed on a CPU within this set. This choice
 provides two benefits. First, contention on the device queue lock is
@@ -377,15 +382,40 @@ transmit queue). Secondly, cache miss rate on transmit completion is
 reduced, in particular for data cache lines that hold the sk_buff
 structures.
 
-XPS is configured per transmit queue by setting a bitmap of CPUs that
-may use that queue to transmit. The reverse mapping, from CPUs to
-transmit queues, is computed and maintained for each network device.
-When transmitting the first packet in a flow, the function
-get_xps_queue() is called to select a queue. This function uses the ID
-of the running CPU as a key into the CPU-to-queue lookup table. If the
+2. XPS using receive queues map
+
+This mapping is used to pick transmit queue based on the receive
+queue(s) map configuration set by the administrator. A set of receive
+queues can be mapped to a set of transmit queues (many:many), although
+the common use case is a 1:1 mapping. This will enable sending packets
+on the same queue associations for transmit and receive. This is useful for
+busy polling multi-threaded workloads where there are challenges in
+associating a given CPU to a given application thread. The application
+threads are not pinned to CPUs and each thread handles packets
+received on a single queue. The receive queue number is cached in the
+socket for the connection. In this model, sending the packets on the same
+transmit queue corresponding to the associated receive queue has benefits
+in keeping the CPU overhead low. Transmit completion work is locked into
+the same queue-association that a given application is polling on. This
+avoids the overhead of triggering an interrupt on another CPU. When the
+application cleans up the packets during the busy poll, transmit completion
+may be processed along with it in the same thread context and so result in
+reduced latency.
+
+XPS is configured per transmit queue by setting a bitmap of
+CPUs/receive-queues that may use that queue to transmit. The reverse
+mapping, from CPUs to transmit queues or from receive-queues to transmit
+queues, is computed and maintained for each network device. When
+transmitting the first packet in a flow, the function get_xps_queue() is
+called to select a queue. This function uses the ID of the receive queue
+for the socket connection for a match in the receive queue-to-transmit queue
+lookup table. Alternatively, this function can also use the ID of the
+running CPU as a key into the CPU-to-queue lookup table. If the
 ID matches a single queue, that is used for transmission. If multiple
 queues match, one is selected by using the flow hash to compute an index
-into the set.
+into the set. When selecting the transmit queue based on receive queue(s)
+map, the transmit device is not validated against the receive device as it
+requires expensive lookup operation in the datapath.
 
 The queue chosen for transmitting a particular flow is saved in the
 corresponding socket structure for the flow (e.g. a TCP connection).
@@ -404,11 +434,15 @@ acknowledged.
 
 XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by
 default for SMP). The functionality remains disabled until explicitly
-configured. To enable XPS, the bitmap of CPUs that may use a transmit
-queue is configured using the sysfs file entry:
+configured. To enable XPS, the bitmap of CPUs/receive-queues that may
+use a transmit queue is configured using the sysfs file entry:
 
+For selection based on CPUs map:
 /sys/class/net/<dev>/queues/tx-<n>/xps_cpus
 
+For selection based on receive-queues map:
+/sys/class/net/<dev>/queues/tx-<n>/xps_rxqs
+
 == Suggested Configuration
 
 For a network device with a single transmission queue, XPS configuration
@@ -421,6 +455,11 @@ best CPUs to share a given queue are probably those that share the cache
 with the CPU that processes transmit completions for that queue
 (transmit interrupts).
 
+For transmit queue selection based on receive queue(s), XPS has to be
+explicitly configured mapping receive-queue(s) to transmit queue(s). If the
+user configuration for receive-queue map does not apply, then the transmit
+queue is selected based on the CPUs map.
+
 Per TX Queue rate limitation:
 =============================
 
index 13081b3decefa834824b544182d0986e83bc50b4..a7d354ddda7baeb59760215cb41222e3b4698a8d 100644 (file)
@@ -48,7 +48,7 @@ void strp_pause(struct strparser *strp)
      Temporarily pause a stream parser. Message parsing is suspended
      and no new messages are delivered to the upper layer.
 
-void strp_pause(struct strparser *strp)
+void strp_unpause(struct strparser *strp)
 
      Unpause a paused stream parser.
 
index a289285d2412ed367983fdc3a5109d7041425410..7d3684e81df6ca03c29e86fd9d321bdaa4dc6f69 100644 (file)
@@ -9,7 +9,7 @@ rfkill - RF kill switch support
 Introduction
 ============
 
-The rfkill subsystem provides a generic interface to disabling any radio
+The rfkill subsystem provides a generic interface for disabling any radio
 transmitter in the system. When a transmitter is blocked, it shall not
 radiate any power.
 
@@ -45,7 +45,7 @@ The rfkill subsystem is composed of three main components:
  * the rfkill drivers.
 
 The rfkill core provides API for kernel drivers to register their radio
-transmitter with the kernel, methods for turning it on and off and, letting
+transmitter with the kernel, methods for turning it on and off, and letting
 the system know about hardware-disabled states that may be implemented on
 the device.
 
@@ -54,7 +54,7 @@ ways for userspace to query the current states. See the "Userspace support"
 section below.
 
 When the device is hard-blocked (either by a call to rfkill_set_hw_state()
-or from query_hw_block) set_block() will be invoked for additional software
+or from query_hw_block), set_block() will be invoked for additional software
 block, but drivers can ignore the method call since they can use the return
 value of the function rfkill_set_hw_state() to sync the software state
 instead of keeping track of calls to set_block(). In fact, drivers should
@@ -65,7 +65,6 @@ keeps track of soft and hard block separately.
 Kernel API
 ==========
 
-
 Drivers for radio transmitters normally implement an rfkill driver.
 
 Platform drivers might implement input devices if the rfkill button is just
@@ -75,14 +74,14 @@ a way to turn on/off the transmitter(s).
 
 For some platforms, it is possible that the hardware state changes during
 suspend/hibernation, in which case it will be necessary to update the rfkill
-core with the current state is at resume time.
+core with the current state at resume time.
 
 To create an rfkill driver, driver's Kconfig needs to have::
 
        depends on RFKILL || !RFKILL
 
 to ensure the driver cannot be built-in when rfkill is modular. The !RFKILL
-case allows the driver to be built when rfkill is not configured, which
+case allows the driver to be built when rfkill is not configured, in which
 case all rfkill API can still be used but will be provided by static inlines
 which compile to almost nothing.
 
@@ -91,7 +90,7 @@ rfkill drivers that control devices that can be hard-blocked unless they also
 assign the poll_hw_block() callback (then the rfkill core will poll the
 device). Don't do this unless you cannot get the event in any other way.
 
-RFKill provides per-switch LED triggers, which can be used to drive LEDs
+rfkill provides per-switch LED triggers, which can be used to drive LEDs
 according to the switch state (LED_FULL when blocked, LED_OFF otherwise).
 
 
@@ -114,7 +113,7 @@ a specified type) into a state which also updates the default state for
 hotplugged devices.
 
 After an application opens /dev/rfkill, it can read the current state of all
-devices. Changes can be either obtained by either polling the descriptor for
+devices. Changes can be obtained by either polling the descriptor for
 hotplug or state change events or by listening for uevents emitted by the
 rfkill core framework.
 
@@ -127,8 +126,7 @@ environment variables set::
        RFKILL_STATE
        RFKILL_TYPE
 
-The contents of these variables corresponds to the "name", "state" and
+The content of these variables corresponds to the "name", "state" and
 "type" sysfs files explained above.
 
-
 For further details consult Documentation/ABI/stable/sysfs-class-rfkill.
index e73bcf9cb5f31cc756521702bbc15fd142e09c71..7ffea6aa22e3c89d4b6e6c7359d40a55c4241176 100644 (file)
@@ -1729,35 +1729,35 @@ If a variable isn't a key variable or prefixed with 'vals=', the
 associated event field will be saved in a variable but won't be summed
 as a value:
 
-  # echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger
+  # echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger
 
 Multiple variables can be assigned at the same time.  The below would
 result in both ts0 and b being created as variables, with both
 common_timestamp and field1 additionally being summed as values:
 
-  # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \
+  # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \
        event/trigger
 
 Note that variable assignments can appear either preceding or
 following their use.  The command below behaves identically to the
 command above:
 
-  # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \
+  # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \
        event/trigger
 
 Any number of variables not bound to a 'vals=' prefix can also be
 assigned by simply separating them with colons.  Below is the same
 thing but without the values being summed in the histogram:
 
-  # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger
+  # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger
 
 Variables set as above can be referenced and used in expressions on
 another event.
 
 For example, here's how a latency can be calculated:
 
-  # echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger
-  # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger
+  # echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger
+  # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger
 
 In the first line above, the event's timetamp is saved into the
 variable ts0.  In the next line, ts0 is subtracted from the second
@@ -1766,7 +1766,7 @@ yet another variable, 'wakeup_lat'.  The hist trigger below in turn
 makes use of the wakeup_lat variable to compute a combined latency
 using the same key and variable from yet another event:
 
-  # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger
+  # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger
 
 2.2.2 Synthetic Events
 ----------------------
@@ -1807,10 +1807,11 @@ the command that defined it with a '!':
 At this point, there isn't yet an actual 'wakeup_latency' event
 instantiated in the event subsytem - for this to happen, a 'hist
 trigger action' needs to be instantiated and bound to actual fields
-and variables defined on other events (see Section 6.3.3 below).
+and variables defined on other events (see Section 2.2.3 below on
+how that is done using hist trigger 'onmatch' action). Once that is
+done, the 'wakeup_latency' synthetic event instance is created.
 
-Once that is done, an event instance is created, and a histogram can
-be defined using it:
+A histogram can now be defined for the new synthetic event:
 
   # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
         /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
@@ -1960,7 +1961,7 @@ hist trigger specification.
     back to that pid, the timestamp difference is calculated.  If the
     resulting latency, stored in wakeup_lat, exceeds the current
     maximum latency, the values specified in the save() fields are
-    recoreded:
+    recorded:
 
     # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
             if comm=="cyclictest"' >> \
index 635e57493709e16fbecc0235723dc742a608ae0e..b8cb38a98c1989eef926795b79b7399d65700135 100644 (file)
@@ -226,7 +226,7 @@ $ rm configs/<config name>.<number>/<function>
 where <config name>.<number> specify the configuration and <function> is
 a symlink to a function being removed from the configuration, e.g.:
 
-$ rm configfs/c.1/ncm.usb0
+$ rm configs/c.1/ncm.usb0
 
 ...
 ...
index 495b7742ab58086b5c81fff88eeb884769391b49..d10944e619d3d28c43bcca85bc3dd2761cee37f0 100644 (file)
@@ -4610,7 +4610,7 @@ This capability indicates that kvm will implement the interfaces to handle
 reset, migration and nested KVM for branch prediction blocking. The stfle
 facility 82 should not be provided to the guest without this capability.
 
-8.14 KVM_CAP_HYPERV_TLBFLUSH
+8.18 KVM_CAP_HYPERV_TLBFLUSH
 
 Architectures: x86
 
index 9d5eeff51b5fd32979f64d288375b6489ff25712..130ff2f4d33a290c588016ed39d5de9a0baf3104 100644 (file)
@@ -2971,9 +2971,13 @@ N:       bcm585*
 N:     bcm586*
 N:     bcm88312
 N:     hr2
-F:     arch/arm64/boot/dts/broadcom/ns2*
+N:     stingray
+F:     arch/arm64/boot/dts/broadcom/northstar2/*
+F:     arch/arm64/boot/dts/broadcom/stingray/*
 F:     drivers/clk/bcm/clk-ns*
+F:     drivers/clk/bcm/clk-sr*
 F:     drivers/pinctrl/bcm/pinctrl-ns*
+F:     include/dt-bindings/clock/bcm-sr*
 
 BROADCOM KONA GPIO DRIVER
 M:     Ray Jui <rjui@broadcom.com>
@@ -4360,12 +4364,7 @@ L:       iommu@lists.linux-foundation.org
 T:     git git://git.infradead.org/users/hch/dma-mapping.git
 W:     http://git.infradead.org/users/hch/dma-mapping.git
 S:     Supported
-F:     lib/dma-debug.c
-F:     lib/dma-direct.c
-F:     lib/dma-noncoherent.c
-F:     lib/dma-virt.c
-F:     drivers/base/dma-mapping.c
-F:     drivers/base/dma-coherent.c
+F:     kernel/dma/
 F:     include/asm-generic/dma-mapping.h
 F:     include/linux/dma-direct.h
 F:     include/linux/dma-mapping.h
@@ -5674,7 +5673,7 @@ F:        drivers/crypto/caam/
 F:     Documentation/devicetree/bindings/crypto/fsl-sec4.txt
 
 FREESCALE DIU FRAMEBUFFER DRIVER
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/fbdev/fsl-diu-fb.*
@@ -5774,7 +5773,7 @@ S:        Maintained
 F:     drivers/net/wan/fsl_ucc_hdlc*
 
 FREESCALE QUICC ENGINE UCC UART DRIVER
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/tty/serial/ucc_uart.c
@@ -5798,7 +5797,7 @@ F:        drivers/net/ethernet/freescale/fs_enet/
 F:     include/linux/fs_enet_pd.h
 
 FREESCALE SOC SOUND DRIVERS
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 M:     Nicolin Chen <nicoleotsuka@gmail.com>
 M:     Xiubo Li <Xiubo.Lee@gmail.com>
 R:     Fabio Estevam <fabio.estevam@nxp.com>
@@ -9159,6 +9158,7 @@ S:        Supported
 W:     http://www.mellanox.com
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
 F:     drivers/net/ethernet/mellanox/mlxsw/
+F:     tools/testing/selftests/drivers/net/mlxsw/
 
 MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
 M:     mlxsw@mellanox.com
@@ -9756,6 +9756,11 @@ L:       linux-scsi@vger.kernel.org
 S:     Maintained
 F:     drivers/scsi/NCR_D700.*
 
+NCSI LIBRARY:
+M:     Samuel Mendoza-Jonas <sam@mendozajonas.com>
+S:     Maintained
+F:     net/ncsi/
+
 NCT6775 HARDWARE MONITOR DRIVER
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-hwmon@vger.kernel.org
@@ -9882,6 +9887,7 @@ M:        Andrew Lunn <andrew@lunn.ch>
 M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 S:     Maintained
+F:     Documentation/devicetree/bindings/net/dsa/
 F:     net/dsa/
 F:     include/net/dsa.h
 F:     include/linux/dsa/
@@ -11476,6 +11482,15 @@ W:     http://wireless.kernel.org/en/users/Drivers/p54
 S:     Obsolete
 F:     drivers/net/wireless/intersil/prism54/
 
+PROC FILESYSTEM
+R:     Alexey Dobriyan <adobriyan@gmail.com>
+L:     linux-kernel@vger.kernel.org
+L:     linux-fsdevel@vger.kernel.org
+S:     Maintained
+F:     fs/proc/
+F:     include/linux/proc_fs.h
+F:     tools/testing/selftests/proc/
+
 PROC SYSCTL
 M:     "Luis R. Rodriguez" <mcgrof@kernel.org>
 M:     Kees Cook <keescook@chromium.org>
@@ -11808,9 +11823,9 @@ F:  Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
 F:  drivers/cpufreq/qcom-cpufreq-kryo.c
 
 QUALCOMM EMAC GIGABIT ETHERNET DRIVER
-M:     Timur Tabi <timur@codeaurora.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Maintained
 F:     drivers/net/ethernet/qualcomm/emac/
 
 QUALCOMM HEXAGON ARCHITECTURE
@@ -12048,6 +12063,13 @@ S:     Maintained
 F:     sound/soc/codecs/rt*
 F:     include/sound/rt*.h
 
+REALTEK RTL83xx SMI DSA ROUTER CHIPS
+M:     Linus Walleij <linus.walleij@linaro.org>
+S:     Maintained
+F:     Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
+F:     drivers/net/dsa/realtek-smi*
+F:     drivers/net/dsa/rtl83*
+
 REGISTER MAP ABSTRACTION
 M:     Mark Brown <broonie@kernel.org>
 L:     linux-kernel@vger.kernel.org
@@ -12155,6 +12177,8 @@ S:      Maintained
 F:     Documentation/rfkill.txt
 F:     Documentation/ABI/stable/sysfs-class-rfkill
 F:     net/rfkill/
+F:     include/linux/rfkill.h
+F:     include/uapi/linux/rfkill.h
 
 RHASHTABLE
 M:     Thomas Graf <tgraf@suug.ch>
@@ -12162,7 +12186,9 @@ M:      Herbert Xu <herbert@gondor.apana.org.au>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     lib/rhashtable.c
+F:     lib/test_rhashtable.c
 F:     include/linux/rhashtable.h
+F:     include/linux/rhashtable-types.h
 
 RICOH R5C592 MEMORYSTICK DRIVER
 M:     Maxim Levitsky <maximlevitsky@gmail.com>
@@ -13648,7 +13674,7 @@ M:      Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
 S:     Supported
-F:     lib/swiotlb.c
+F:     kernel/dma/swiotlb.c
 F:     arch/*/kernel/pci-swiotlb.c
 F:     include/linux/swiotlb.h
 
@@ -15572,9 +15598,17 @@ M:     x86@kernel.org
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
 S:     Maintained
+F:     Documentation/devicetree/bindings/x86/
 F:     Documentation/x86/
 F:     arch/x86/
 
+X86 ENTRY CODE
+M:     Andy Lutomirski <luto@kernel.org>
+L:     linux-kernel@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm
+S:     Maintained
+F:     arch/x86/entry/
+
 X86 MCE INFRASTRUCTURE
 M:     Tony Luck <tony.luck@intel.com>
 M:     Borislav Petkov <bp@alien8.de>
@@ -15597,7 +15631,7 @@ F:      drivers/platform/x86/
 F:     drivers/platform/olpc/
 
 X86 VDSO
-M:     Andy Lutomirski <luto@amacapital.net>
+M:     Andy Lutomirski <luto@kernel.org>
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
 S:     Maintained
index ca2af1ab91ebadf6ac5c62150b4e72f2a1f1441d..d15ac32afbaf03bac1095938a31a6295318ccaac 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
@@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA
   KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
 
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y)
-  CC_CAN_LINK := y
-  export CC_CAN_LINK
-endif
-
 # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
 # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
 # CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
index 0c4805a572c8739ff9d657c63961747e3ea08ff3..04a4a138ed131c7256aeb4108453400516b8965a 100644 (file)
@@ -555,11 +555,6 @@ config SMP
 
          If you don't know what to do here, say N.
 
-config HAVE_DEC_LOCK
-       bool
-       depends on SMP
-       default y
-
 config NR_CPUS
        int "Maximum number of CPUs (2-32)"
        range 2 32
index be14f16149d5faf64902441f39ba5c1a08be6104..065fb372e355cf86905fc1be80b97be5ef3218b3 100644 (file)
 
 #define SO_ZEROCOPY            60
 
+#define SO_TXTIME              61
+#define SCM_TXTIME             SO_TXTIME
+
 #endif /* _UAPI_ASM_SOCKET_H */
index 04f9729de57c351c7e142b9aab9d9bca0878a2f3..854d5e79979e4ce929d7998bf1135a12880238f8 100644 (file)
@@ -35,8 +35,6 @@ lib-y =       __divqu.o __remqu.o __divlu.o __remlu.o \
        callback_srm.o srm_puts.o srm_printk.o \
        fls.o
 
-lib-$(CONFIG_SMP) += dec_and_lock.o
-
 # The division routines are built from single source, with different defines.
 AFLAGS___divqu.o = -DDIV
 AFLAGS___remqu.o =       -DREM
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c
deleted file mode 100644 (file)
index a117707..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/alpha/lib/dec_and_lock.c
- *
- * ll/sc version of atomic_dec_and_lock()
- * 
- */
-
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <linux/export.h>
-
-  asm (".text                                  \n\
-       .global _atomic_dec_and_lock            \n\
-       .ent _atomic_dec_and_lock               \n\
-       .align  4                               \n\
-_atomic_dec_and_lock:                          \n\
-       .prologue 0                             \n\
-1:     ldl_l   $1, 0($16)                      \n\
-       subl    $1, 1, $1                       \n\
-       beq     $1, 2f                          \n\
-       stl_c   $1, 0($16)                      \n\
-       beq     $1, 4f                          \n\
-       mb                                      \n\
-       clr     $0                              \n\
-       ret                                     \n\
-2:     br      $29, 3f                         \n\
-3:     ldgp    $29, 0($29)                     \n\
-       br      $atomic_dec_and_lock_1..ng      \n\
-       .subsection 2                           \n\
-4:     br      1b                              \n\
-       .previous                               \n\
-       .end _atomic_dec_and_lock");
-
-static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
-{
-       /* Slow path */
-       spin_lock(lock);
-       if (atomic_dec_and_test(atomic))
-               return 1;
-       spin_unlock(lock);
-       return 0;
-}
-EXPORT_SYMBOL(_atomic_dec_and_lock);
index 54eeb8d00bc62a9f818aa9a833cbc15e7a1d9324..843edfd000be7210ebef62e529ba074df5ee242f 100644 (file)
@@ -1245,8 +1245,14 @@ config PCI
          VESA. If you have PCI, say Y, otherwise N.
 
 config PCI_DOMAINS
-       bool
+       bool "Support for multiple PCI domains"
        depends on PCI
+       help
+         Enable PCI domains kernel management. Say Y if your machine
+         has a PCI bus hierarchy that requires more than one PCI
+         domain (aka segment) to be correctly managed. Say N otherwise.
+
+         If you don't know what to do here, say N.
 
 config PCI_DOMAINS_GENERIC
        def_bool PCI_DOMAINS
index 6782ce481ac967ded05bbfc124355aa894790d6f..d8769956cbfcff7b4a38e72959ba8b88f397a1c0 100644 (file)
@@ -139,7 +139,7 @@ gpio-fan {
                                              3700 5
                                              3900 6
                                              4000 7>;
-                       cooling-cells = <2>;
+                       #cooling-cells = <2>;
                };
 
                gpio-leds {
index 9fe4f5a6379e3b60d79a6ed8a0327f680434861e..2c4df2d2d4a6e1165fe27565a19d681c47a32cfa 100644 (file)
@@ -216,7 +216,7 @@ i2c0: i2c@18008000 {
                        reg = <0x18008000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
@@ -245,7 +245,7 @@ i2c1: i2c@1800b000 {
                        reg = <0x1800b000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
@@ -256,7 +256,7 @@ pcie0: pcie@18012000 {
 
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
+                       interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
 
                        linux,pci-domain = <0>;
 
@@ -278,10 +278,10 @@ msi0: msi-controller {
                                compatible = "brcm,iproc-msi";
                                msi-controller;
                                interrupt-parent = <&gic>;
-                               interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 97 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 98 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 99 IRQ_TYPE_NONE>;
+                               interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
 
@@ -291,7 +291,7 @@ pcie1: pcie@18013000 {
 
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
+                       interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 
                        linux,pci-domain = <1>;
 
@@ -313,10 +313,10 @@ msi1: msi-controller {
                                compatible = "brcm,iproc-msi";
                                msi-controller;
                                interrupt-parent = <&gic>;
-                               interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 103 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 104 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 105 IRQ_TYPE_NONE>;
+                               interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
 
index 3f9cedd8011f0c22fb05b6a50d1705fc5ceab05d..3084a7c957339f0edc2fef97d203b08635c96790 100644 (file)
@@ -264,7 +264,7 @@ i2c0: i2c@38000 {
                        reg = <0x38000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 95 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                };
 
@@ -279,7 +279,7 @@ i2c1: i2c@3b000 {
                        reg = <0x3b000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                };
        };
@@ -300,7 +300,7 @@ pcie0: pcie@18012000 {
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
@@ -322,10 +322,10 @@ msi0: msi-controller {
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 182 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 183 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 184 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 185 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
@@ -336,7 +336,7 @@ pcie1: pcie@18013000 {
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <1>;
 
@@ -358,10 +358,10 @@ msi1: msi-controller {
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 188 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 189 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 190 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 191 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
index dcc55aa84583cdd18f7ef6ecd780eb947be1ef1f..09ba8504632284532e3b17c6d1531e2d732fadc4 100644 (file)
@@ -391,7 +391,7 @@ i2c0: i2c@38000 {
                        reg = <0x38000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        dma-coherent;
                        status = "disabled";
@@ -496,7 +496,7 @@ pcie0: pcie@18012000 {
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
@@ -519,10 +519,10 @@ msi0: msi-controller {
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 128 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 129 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 130 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
@@ -533,7 +533,7 @@ pcie1: pcie@18013000 {
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <1>;
 
@@ -556,10 +556,10 @@ msi1: msi-controller {
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 134 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 135 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 136 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
@@ -570,7 +570,7 @@ pcie2: pcie@18014000 {
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <2>;
 
@@ -593,10 +593,10 @@ msi2: msi-controller {
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 140 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 141 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 142 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
index 9a076c409f4ed35fcf5fbe79807ede6e7e8466d5..ef995e50ee12bfd8b3d90d9e07062a41e04f4ff3 100644 (file)
@@ -365,7 +365,7 @@ mdio: mdio@18003000 {
        i2c0: i2c@18009000 {
                compatible = "brcm,iproc-i2c";
                reg = <0x18009000 0x50>;
-               interrupts = <GIC_SPI 121 IRQ_TYPE_NONE>;
+               interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clock-frequency = <100000>;
index f6f1597b03df931a1dea057921a43cea9f929a31..0f4f817a9e229c58f973f935a6f5906b1e0f8979 100644 (file)
@@ -549,11 +549,7 @@ gpio: gpio@226000 {
                        gpio-controller;
                        #gpio-cells = <2>;
                        reg = <0x226000 0x1000>;
-                       interrupts = <42 IRQ_TYPE_EDGE_BOTH
-                               43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
-                               45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
-                               47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
-                               49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
+                       interrupts = <42 43 44 45 46 47 48 49 50>;
                        ti,ngpio = <144>;
                        ti,davinci-gpio-unbanked = <0>;
                        status = "disabled";
index fb5c954ab95a2ca98ac3223a2ce27e8ca7581dd8..6f258b50eb44262bcc6f5549ddfc168cb3e9dfa9 100644 (file)
@@ -156,6 +156,100 @@ touchkeys@26 {
                };
        };
 
+       /* This is a RealTek RTL8366RB switch and PHY using SMI over GPIO */
+       switch {
+               compatible = "realtek,rtl8366rb";
+               /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */
+               mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
+               mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
+               realtek,disable-leds;
+
+               switch_intc: interrupt-controller {
+                       /* GPIO 15 provides the interrupt */
+                       interrupt-parent = <&gpio0>;
+                       interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+                       interrupt-controller;
+                       #address-cells = <0>;
+                       #interrupt-cells = <1>;
+               };
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               label = "lan0";
+                               phy-handle = <&phy0>;
+                       };
+                       port@1 {
+                               reg = <1>;
+                               label = "lan1";
+                               phy-handle = <&phy1>;
+                       };
+                       port@2 {
+                               reg = <2>;
+                               label = "lan2";
+                               phy-handle = <&phy2>;
+                       };
+                       port@3 {
+                               reg = <3>;
+                               label = "lan3";
+                               phy-handle = <&phy3>;
+                       };
+                       port@4 {
+                               reg = <4>;
+                               label = "wan";
+                               phy-handle = <&phy4>;
+                       };
+                       rtl8366rb_cpu_port: port@5 {
+                               reg = <5>;
+                               label = "cpu";
+                               ethernet = <&gmac0>;
+                               phy-mode = "rgmii";
+                               fixed-link {
+                                       speed = <1000>;
+                                       full-duplex;
+                                       pause;
+                               };
+                       };
+
+               };
+
+               mdio {
+                       compatible = "realtek,smi-mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       phy0: phy@0 {
+                               reg = <0>;
+                               interrupt-parent = <&switch_intc>;
+                               interrupts = <0>;
+                       };
+                       phy1: phy@1 {
+                               reg = <1>;
+                               interrupt-parent = <&switch_intc>;
+                               interrupts = <1>;
+                       };
+                       phy2: phy@2 {
+                               reg = <2>;
+                               interrupt-parent = <&switch_intc>;
+                               interrupts = <2>;
+                       };
+                       phy3: phy@3 {
+                               reg = <3>;
+                               interrupt-parent = <&switch_intc>;
+                               interrupts = <3>;
+                       };
+                       phy4: phy@4 {
+                               reg = <4>;
+                               interrupt-parent = <&switch_intc>;
+                               interrupts = <12>;
+                       };
+               };
+       };
+
        soc {
                flash@30000000 {
                        /*
@@ -223,10 +317,12 @@ pinctrl {
                                 * gpio0bgrp cover line 7 used by WPS LED
                                 * gpio0cgrp cover line 8, 13 used by keys
                                 *           and 11, 12 used by the HD LEDs
+                                *           and line 14, 15 used by RTL8366
+                                *           RESET and phy ready
                                 * gpio0egrp cover line 16 used by VDISP
                                 * gpio0fgrp cover line 17 used by TK IRQ
                                 * gpio0ggrp cover line 20 used by panel CS
-                                * gpio0hgrp cover line 21,22 used by RTL8366RB
+                                * gpio0hgrp cover line 21,22 used by RTL8366RB MDIO
                                 */
                                gpio0_default_pins: pinctrl-gpio0 {
                                        mux {
@@ -250,6 +346,32 @@ mux {
                                                groups = "gpio1bgrp";
                                        };
                                };
+                               pinctrl-gmii {
+                                       mux {
+                                               function = "gmii";
+                                               groups = "gmii_gmac0_grp";
+                                       };
+                                       conf0 {
+                                               pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV",
+                                                    "Y7 GMAC0 RXC", "Y11 GMAC1 RXC",
+                                                    "T8 GMAC0 TXEN", "W11 GMAC1 TXEN",
+                                                    "U8 GMAC0 TXC", "V11 GMAC1 TXC",
+                                                    "W8 GMAC0 RXD0", "V9 GMAC0 RXD1",
+                                                    "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3",
+                                                    "T7 GMAC0 TXD0", "U6 GMAC0 TXD1",
+                                                    "V7 GMAC0 TXD2", "U7 GMAC0 TXD3",
+                                                    "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1",
+                                                    "T11 GMAC1 RXD2", "W12 GMAC1 RXD3",
+                                                    "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1",
+                                                    "W10 GMAC1 TXD2", "T9 GMAC1 TXD3";
+                                               skew-delay = <7>;
+                                       };
+                                       /* Set up drive strength on GMAC0 to 16 mA */
+                                       conf1 {
+                                               groups = "gmii_gmac0_grp";
+                                               drive-strength = <16>;
+                                       };
+                               };
                        };
                };
 
@@ -291,6 +413,22 @@ pci@50000000 {
                                <0x6000 0 0 4 &pci_intc 2>;
                };
 
+               ethernet@60000000 {
+                       status = "okay";
+
+                       ethernet-port@0 {
+                               phy-mode = "rgmii";
+                               fixed-link {
+                                       speed = <1000>;
+                                       full-duplex;
+                                       pause;
+                               };
+                       };
+                       ethernet-port@1 {
+                               /* Not used in this platform */
+                       };
+               };
+
                ata@63000000 {
                        status = "okay";
                };
index 70483ce72ba6cf648809acb7f24be3af11817674..77f8f030dd0772aba631f57b704a7e60a9bd0532 100644 (file)
@@ -90,7 +90,7 @@ ecspi5: ecspi@2018000 {
                                        clocks = <&clks IMX6Q_CLK_ECSPI5>,
                                                 <&clks IMX6Q_CLK_ECSPI5>;
                                        clock-names = "ipg", "per";
-                                       dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
+                                       dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
                                        dma-names = "rx", "tx";
                                        status = "disabled";
                                };
index d8b94f47498b67051ade669f23d2796a0b1e7433..4e4a55aad5c9ca9aa6fff90deb0ae1c5e99c3a13 100644 (file)
@@ -1344,7 +1344,7 @@ pcie: pcie@8ffc000 {
                        ranges = <0x81000000 0 0          0x08f80000 0 0x00010000 /* downstream I/O */
                                  0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; /* non-prefetchable memory */
                        num-lanes = <1>;
-                       interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
index 486d4e7433ed32d2662fabcf9b25fe54eab0f187..b38f8c24055800c45e1e81aef451f08ac9e27be5 100644 (file)
@@ -748,13 +748,13 @@ mmc: dwmmc0@ff704000 {
                nand0: nand@ff900000 {
                        #address-cells = <0x1>;
                        #size-cells = <0x1>;
-                       compatible = "denali,denali-nand-dt";
+                       compatible = "altr,socfpga-denali-nand";
                        reg = <0xff900000 0x100000>,
                              <0xffb80000 0x10000>;
                        reg-names = "nand_data", "denali_reg";
                        interrupts = <0x0 0x90 0x4>;
                        dma-mask = <0xffffffff>;
-                       clocks = <&nand_clk>;
+                       clocks = <&nand_x_clk>;
                        status = "disabled";
                };
 
index bead79e4b2aa2b624b8f7d21cef4751d6536b724..791ca15c799eba98850cbc3d4b96be7a509c422f 100644 (file)
@@ -593,8 +593,7 @@ spi1: spi@ffda5000 {
                        #size-cells = <0>;
                        reg = <0xffda5000 0x100>;
                        interrupts = <0 102 4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        /*32bit_access;*/
                        tx-dma-channel = <&pdma 16>;
                        rx-dma-channel = <&pdma 17>;
@@ -633,7 +632,7 @@ mmc: dwmmc0@ff808000 {
                nand: nand@ffb90000 {
                        #address-cells = <1>;
                        #size-cells = <1>;
-                       compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand";
+                       compatible = "altr,socfpga-denali-nand";
                        reg = <0xffb90000 0x72000>,
                              <0xffb80000 0x10000>;
                        reg-names = "nand_data", "denali_reg";
index 1e9f7af8f70ff6ba23d9403f930f09dd6e0dda7e..3157be413297e5d22ad3174e2082b5199fc3083c 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMABOUNCE)               += dmabounce.o
 obj-$(CONFIG_SHARP_LOCOMO)     += locomo.o
 obj-$(CONFIG_SHARP_PARAM)      += sharpsl_param.o
 obj-$(CONFIG_SHARP_SCOOP)      += scoop.o
-obj-$(CONFIG_SMP)              += secure_cntvoff.o
+obj-$(CONFIG_CPU_V7)           += secure_cntvoff.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
 obj-$(CONFIG_MCPM)             += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
 CFLAGS_REMOVE_mcpm_entry.o     = -pg
index 7e1c543162c3ab16f11f6be6ccec5a16abae31d0..8f6be19825456496ef471b3b03a78d32354d9736 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_CGROUPS=y
@@ -10,20 +9,10 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_CMDLINE_PARTITION=y
-CONFIG_ARCH_MULTI_V7=y
-# CONFIG_ARCH_MULTI_V5 is not set
-# CONFIG_ARCH_MULTI_V4 is not set
 CONFIG_ARCH_VIRT=y
 CONFIG_ARCH_ALPINE=y
 CONFIG_ARCH_ARTPEC=y
 CONFIG_MACH_ARTPEC6=y
-CONFIG_ARCH_MVEBU=y
-CONFIG_MACH_ARMADA_370=y
-CONFIG_MACH_ARMADA_375=y
-CONFIG_MACH_ARMADA_38X=y
-CONFIG_MACH_ARMADA_39X=y
-CONFIG_MACH_ARMADA_XP=y
-CONFIG_MACH_DOVE=y
 CONFIG_ARCH_AT91=y
 CONFIG_SOC_SAMA5D2=y
 CONFIG_SOC_SAMA5D3=y
@@ -32,9 +21,9 @@ CONFIG_ARCH_BCM=y
 CONFIG_ARCH_BCM_CYGNUS=y
 CONFIG_ARCH_BCM_HR2=y
 CONFIG_ARCH_BCM_NSP=y
-CONFIG_ARCH_BCM_21664=y
-CONFIG_ARCH_BCM_281XX=y
 CONFIG_ARCH_BCM_5301X=y
+CONFIG_ARCH_BCM_281XX=y
+CONFIG_ARCH_BCM_21664=y
 CONFIG_ARCH_BCM2835=y
 CONFIG_ARCH_BCM_63XX=y
 CONFIG_ARCH_BRCMSTB=y
@@ -43,14 +32,14 @@ CONFIG_MACH_BERLIN_BG2=y
 CONFIG_MACH_BERLIN_BG2CD=y
 CONFIG_MACH_BERLIN_BG2Q=y
 CONFIG_ARCH_DIGICOLOR=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_EXYNOS5420_MCPM=y
 CONFIG_ARCH_HIGHBANK=y
 CONFIG_ARCH_HISI=y
 CONFIG_ARCH_HI3xxx=y
-CONFIG_ARCH_HIX5HD2=y
 CONFIG_ARCH_HIP01=y
 CONFIG_ARCH_HIP04=y
-CONFIG_ARCH_KEYSTONE=y
-CONFIG_ARCH_MESON=y
+CONFIG_ARCH_HIX5HD2=y
 CONFIG_ARCH_MXC=y
 CONFIG_SOC_IMX50=y
 CONFIG_SOC_IMX51=y
@@ -60,29 +49,30 @@ CONFIG_SOC_IMX6SL=y
 CONFIG_SOC_IMX6SX=y
 CONFIG_SOC_IMX6UL=y
 CONFIG_SOC_IMX7D=y
-CONFIG_SOC_VF610=y
 CONFIG_SOC_LS1021A=y
+CONFIG_SOC_VF610=y
+CONFIG_ARCH_KEYSTONE=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_MACH_ARMADA_370=y
+CONFIG_MACH_ARMADA_375=y
+CONFIG_MACH_ARMADA_38X=y
+CONFIG_MACH_ARMADA_39X=y
+CONFIG_MACH_ARMADA_XP=y
+CONFIG_MACH_DOVE=y
 CONFIG_ARCH_OMAP3=y
 CONFIG_ARCH_OMAP4=y
 CONFIG_SOC_OMAP5=y
 CONFIG_SOC_AM33XX=y
 CONFIG_SOC_AM43XX=y
 CONFIG_SOC_DRA7XX=y
+CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MEDIATEK=y
 CONFIG_ARCH_MSM8X60=y
 CONFIG_ARCH_MSM8960=y
 CONFIG_ARCH_MSM8974=y
 CONFIG_ARCH_ROCKCHIP=y
-CONFIG_ARCH_SOCFPGA=y
-CONFIG_PLAT_SPEAR=y
-CONFIG_ARCH_SPEAR13XX=y
-CONFIG_MACH_SPEAR1310=y
-CONFIG_MACH_SPEAR1340=y
-CONFIG_ARCH_STI=y
-CONFIG_ARCH_STM32=y
-CONFIG_ARCH_EXYNOS=y
-CONFIG_EXYNOS5420_MCPM=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_EMEV2=y
 CONFIG_ARCH_R7S72100=y
@@ -99,40 +89,33 @@ CONFIG_ARCH_R8A7792=y
 CONFIG_ARCH_R8A7793=y
 CONFIG_ARCH_R8A7794=y
 CONFIG_ARCH_SH73A0=y
+CONFIG_ARCH_SOCFPGA=y
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR13XX=y
+CONFIG_MACH_SPEAR1310=y
+CONFIG_MACH_SPEAR1340=y
+CONFIG_ARCH_STI=y
+CONFIG_ARCH_STM32=y
 CONFIG_ARCH_SUNXI=y
-CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_TEGRA=y
-CONFIG_ARCH_TEGRA_2x_SOC=y
-CONFIG_ARCH_TEGRA_3x_SOC=y
-CONFIG_ARCH_TEGRA_114_SOC=y
-CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_ARCH_UNIPHIER=y
 CONFIG_ARCH_U8500=y
-CONFIG_MACH_HREFV60=y
-CONFIG_MACH_SNOWBALL=y
 CONFIG_ARCH_VEXPRESS=y
 CONFIG_ARCH_VEXPRESS_TC2_PM=y
 CONFIG_ARCH_WM8850=y
 CONFIG_ARCH_ZYNQ=y
-CONFIG_TRUSTED_FOUNDATIONS=y
-CONFIG_PCI=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCI_DRA7XX=y
-CONFIG_PCI_DRA7XX_EP=y
-CONFIG_PCI_KEYSTONE=y
-CONFIG_PCI_MSI=y
+CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_MVEBU=y
 CONFIG_PCI_TEGRA=y
 CONFIG_PCI_RCAR_GEN2=y
 CONFIG_PCIE_RCAR=y
-CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_DRA7XX_EP=y
+CONFIG_PCI_KEYSTONE=y
 CONFIG_PCI_ENDPOINT=y
 CONFIG_PCI_ENDPOINT_CONFIGFS=y
 CONFIG_PCI_EPF_TEST=m
 CONFIG_SMP=y
 CONFIG_NR_CPUS=16
-CONFIG_HIGHPTE=y
-CONFIG_CMA=y
 CONFIG_SECCOMP=y
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
@@ -145,14 +128,14 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=m
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
 CONFIG_ARM_IMX6Q_CPUFREQ=y
 CONFIG_QORIQ_CPUFREQ=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
-CONFIG_NEON=y
-CONFIG_KERNEL_MODE_NEON=y
 CONFIG_ARM_ZYNQ_CPUIDLE=y
 CONFIG_ARM_EXYNOS_CPUIDLE=y
+CONFIG_KERNEL_MODE_NEON=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -170,23 +153,13 @@ CONFIG_IPV6_MIP6=m
 CONFIG_IPV6_TUNNEL=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_NET_DSA=m
-CONFIG_NET_SWITCHDEV=y
 CONFIG_CAN=y
-CONFIG_CAN_RAW=y
-CONFIG_CAN_BCM=y
-CONFIG_CAN_DEV=y
 CONFIG_CAN_AT91=m
 CONFIG_CAN_FLEXCAN=m
-CONFIG_CAN_RCAR=m
+CONFIG_CAN_SUN4I=y
 CONFIG_CAN_XILINXCAN=y
+CONFIG_CAN_RCAR=m
 CONFIG_CAN_MCP251X=y
-CONFIG_NET_DSA_BCM_SF2=m
-CONFIG_B53=m
-CONFIG_B53_SPI_DRIVER=m
-CONFIG_B53_MDIO_DRIVER=m
-CONFIG_B53_MMAP_DRIVER=m
-CONFIG_B53_SRAB_DRIVER=m
-CONFIG_CAN_SUN4I=y
 CONFIG_BT=m
 CONFIG_BT_HCIUART=m
 CONFIG_BT_HCIUART_BCM=y
@@ -199,11 +172,9 @@ CONFIG_RFKILL_INPUT=y
 CONFIG_RFKILL_GPIO=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
 CONFIG_OMAP_OCP2SCP=y
 CONFIG_SIMPLE_PM_BUS=y
-CONFIG_SUNXI_RSB=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -236,7 +207,6 @@ CONFIG_PCI_ENDPOINT_TEST=m
 CONFIG_EEPROM_AT24=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=y
@@ -251,14 +221,20 @@ CONFIG_SATA_MV=y
 CONFIG_SATA_RCAR=y
 CONFIG_NETDEVICES=y
 CONFIG_VIRTIO_NET=y
-CONFIG_HIX5HD2_GMAC=y
+CONFIG_B53_SPI_DRIVER=m
+CONFIG_B53_MDIO_DRIVER=m
+CONFIG_B53_MMAP_DRIVER=m
+CONFIG_B53_SRAB_DRIVER=m
+CONFIG_NET_DSA_BCM_SF2=m
 CONFIG_SUN4I_EMAC=y
-CONFIG_MACB=y
 CONFIG_BCMGENET=m
 CONFIG_BGMAC_BCMA=y
 CONFIG_SYSTEMPORT=m
+CONFIG_MACB=y
 CONFIG_NET_CALXEDA_XGMAC=y
 CONFIG_GIANFAR=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_E1000E=y
 CONFIG_IGB=y
 CONFIG_MV643XX_ETH=y
 CONFIG_MVNETA=y
@@ -268,19 +244,17 @@ CONFIG_R8169=y
 CONFIG_SH_ETH=y
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=y
-CONFIG_STMMAC_PLATFORM=y
 CONFIG_DWMAC_DWC_QOS_ETH=y
 CONFIG_TI_CPSW=y
 CONFIG_XILINX_EMACLITE=y
 CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_SMSC_PHY=y
 CONFIG_BROADCOM_PHY=y
 CONFIG_ICPLUS_PHY=y
-CONFIG_REALTEK_PHY=y
+CONFIG_MARVELL_PHY=y
 CONFIG_MICREL_PHY=y
-CONFIG_FIXED_PHY=y
+CONFIG_REALTEK_PHY=y
 CONFIG_ROCKCHIP_PHY=y
+CONFIG_SMSC_PHY=y
 CONFIG_USB_PEGASUS=y
 CONFIG_USB_RTL8152=m
 CONFIG_USB_LAN78XX=m
@@ -288,29 +262,29 @@ CONFIG_USB_USBNET=y
 CONFIG_USB_NET_SMSC75XX=y
 CONFIG_USB_NET_SMSC95XX=y
 CONFIG_BRCMFMAC=m
-CONFIG_RT2X00=m
-CONFIG_RT2800USB=m
 CONFIG_MWIFIEX=m
 CONFIG_MWIFIEX_SDIO=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
 CONFIG_INPUT_JOYDEV=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_QT1070=m
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_KEYBOARD_TEGRA=y
-CONFIG_KEYBOARD_SPEAR=y
+CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_KEYBOARD_ST_KEYSCAN=y
+CONFIG_KEYBOARD_SPEAR=y
 CONFIG_KEYBOARD_CROS_EC=m
-CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_MOUSE_CYAPA=m
 CONFIG_MOUSE_ELAN_I2C=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=m
 CONFIG_TOUCHSCREEN_MMS114=m
+CONFIG_TOUCHSCREEN_WM97XX=m
 CONFIG_TOUCHSCREEN_ST1232=m
 CONFIG_TOUCHSCREEN_STMPE=y
 CONFIG_TOUCHSCREEN_SUN4I=y
-CONFIG_TOUCHSCREEN_WM97XX=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MAX77693_HAPTIC=m
 CONFIG_INPUT_MAX8997_HAPTIC=m
@@ -327,13 +301,12 @@ CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_EM=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_ATMEL=y
 CONFIG_SERIAL_ATMEL_CONSOLE=y
 CONFIG_SERIAL_ATMEL_TTYAT=y
-CONFIG_SERIAL_BCM63XX=y
-CONFIG_SERIAL_BCM63XX_CONSOLE=y
 CONFIG_SERIAL_MESON=y
 CONFIG_SERIAL_MESON_CONSOLE=y
 CONFIG_SERIAL_SAMSUNG=y
@@ -345,15 +318,14 @@ CONFIG_SERIAL_IMX=y
 CONFIG_SERIAL_IMX_CONSOLE=y
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=20
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-CONFIG_SERIAL_SH_SCI_DMA=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_VT8500=y
 CONFIG_SERIAL_VT8500_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_OMAP=y
 CONFIG_SERIAL_OMAP_CONSOLE=y
+CONFIG_SERIAL_BCM63XX=y
+CONFIG_SERIAL_BCM63XX_CONSOLE=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_FSL_LPUART=y
@@ -365,12 +337,10 @@ CONFIG_SERIAL_ST_ASC_CONSOLE=y
 CONFIG_SERIAL_STM32=y
 CONFIG_SERIAL_STM32_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
-CONFIG_HVC_DRIVER=y
 CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_ST=y
 CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_DAVINCI=y
-CONFIG_I2C_MESON=y
-CONFIG_I2C_MUX=y
 CONFIG_I2C_ARB_GPIO_CHALLENGE=m
 CONFIG_I2C_MUX_PCA954x=y
 CONFIG_I2C_MUX_PINCTRL=y
@@ -378,12 +348,13 @@ CONFIG_I2C_DEMUX_PINCTRL=y
 CONFIG_I2C_AT91=m
 CONFIG_I2C_BCM2835=y
 CONFIG_I2C_CADENCE=y
+CONFIG_I2C_DAVINCI=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 CONFIG_I2C_DIGICOLOR=m
 CONFIG_I2C_EMEV2=m
 CONFIG_I2C_GPIO=m
-CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
 CONFIG_I2C_MV64XXX=y
 CONFIG_I2C_RIIC=y
 CONFIG_I2C_RK3X=y
@@ -427,7 +398,6 @@ CONFIG_SPI_SPIDEV=y
 CONFIG_SPMI=y
 CONFIG_PINCTRL_AS3722=y
 CONFIG_PINCTRL_PALMAS=y
-CONFIG_PINCTRL_BCM2835=y
 CONFIG_PINCTRL_APQ8064=y
 CONFIG_PINCTRL_APQ8084=y
 CONFIG_PINCTRL_IPQ8064=y
@@ -437,25 +407,33 @@ CONFIG_PINCTRL_MSM8X74=y
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
-CONFIG_GPIO_GENERIC_PLATFORM=y
 CONFIG_GPIO_DAVINCI=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_EM=y
 CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_SYSCON=y
 CONFIG_GPIO_UNIPHIER=y
 CONFIG_GPIO_XILINX=y
 CONFIG_GPIO_ZYNQ=y
 CONFIG_GPIO_PCA953X=y
 CONFIG_GPIO_PCA953X_IRQ=y
 CONFIG_GPIO_PCF857X=y
-CONFIG_GPIO_TWL4030=y
 CONFIG_GPIO_PALMAS=y
-CONFIG_GPIO_SYSCON=y
 CONFIG_GPIO_TPS6586X=y
 CONFIG_GPIO_TPS65910=y
+CONFIG_GPIO_TWL4030=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_POWER_RESET_AS3722=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_POWER_RESET_ST=y
+CONFIG_POWER_RESET_KEYSTONE=y
+CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_BATTERY_ACT8945A=y
 CONFIG_BATTERY_CPCAP=m
 CONFIG_BATTERY_SBS=y
+CONFIG_AXP20X_POWER=m
 CONFIG_BATTERY_MAX17040=m
 CONFIG_BATTERY_MAX17042=m
 CONFIG_CHARGER_CPCAP=m
@@ -464,15 +442,6 @@ CONFIG_CHARGER_MAX77693=m
 CONFIG_CHARGER_MAX8997=m
 CONFIG_CHARGER_MAX8998=m
 CONFIG_CHARGER_TPS65090=y
-CONFIG_AXP20X_POWER=m
-CONFIG_POWER_RESET_AS3722=y
-CONFIG_POWER_RESET_GPIO=y
-CONFIG_POWER_RESET_GPIO_RESTART=y
-CONFIG_POWER_RESET_KEYSTONE=y
-CONFIG_POWER_RESET_RMOBILE=y
-CONFIG_POWER_RESET_ST=y
-CONFIG_POWER_AVS=y
-CONFIG_ROCKCHIP_IODOMAIN=y
 CONFIG_SENSORS_IIO_HWMON=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
@@ -480,14 +449,12 @@ CONFIG_SENSORS_NTC_THERMISTOR=m
 CONFIG_SENSORS_PWM_FAN=m
 CONFIG_SENSORS_INA2XX=m
 CONFIG_CPU_THERMAL=y
-CONFIG_BCM2835_THERMAL=m
-CONFIG_BRCMSTB_THERMAL=m
 CONFIG_IMX_THERMAL=y
 CONFIG_ROCKCHIP_THERMAL=y
 CONFIG_RCAR_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
-CONFIG_DAVINCI_WATCHDOG=m
-CONFIG_EXYNOS_THERMAL=m
+CONFIG_BCM2835_THERMAL=m
+CONFIG_BRCMSTB_THERMAL=m
 CONFIG_ST_THERMAL_MEMMAP=y
 CONFIG_WATCHDOG=y
 CONFIG_DA9063_WATCHDOG=m
@@ -495,20 +462,24 @@ CONFIG_XILINX_WATCHDOG=y
 CONFIG_ARM_SP805_WATCHDOG=y
 CONFIG_AT91SAM9X_WATCHDOG=y
 CONFIG_SAMA5D4_WATCHDOG=y
+CONFIG_DW_WATCHDOG=y
+CONFIG_DAVINCI_WATCHDOG=m
 CONFIG_ORION_WATCHDOG=y
 CONFIG_RN5T618_WATCHDOG=y
-CONFIG_ST_LPC_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
 CONFIG_IMX2_WDT=y
+CONFIG_ST_LPC_WATCHDOG=y
 CONFIG_TEGRA_WATCHDOG=m
 CONFIG_MESON_WATCHDOG=y
-CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_RENESAS_WDT=m
-CONFIG_BCM2835_WDT=y
 CONFIG_BCM47XX_WDT=y
-CONFIG_BCM7038_WDT=m
+CONFIG_BCM2835_WDT=y
 CONFIG_BCM_KONA_WDT=y
+CONFIG_BCM7038_WDT=m
+CONFIG_BCMA_HOST_SOC=y
+CONFIG_BCMA_DRIVER_GMAC_CMN=y
+CONFIG_BCMA_DRIVER_GPIO=y
 CONFIG_MFD_ACT8945A=y
 CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
@@ -516,7 +487,6 @@ CONFIG_MFD_ATMEL_FLEXCOM=y
 CONFIG_MFD_ATMEL_HLCDC=m
 CONFIG_MFD_BCM590XX=y
 CONFIG_MFD_AC100=y
-CONFIG_MFD_AXP20X=y
 CONFIG_MFD_AXP20X_I2C=y
 CONFIG_MFD_AXP20X_RSB=y
 CONFIG_MFD_CROS_EC=m
@@ -529,11 +499,11 @@ CONFIG_MFD_MAX77693=m
 CONFIG_MFD_MAX8907=y
 CONFIG_MFD_MAX8997=y
 CONFIG_MFD_MAX8998=y
-CONFIG_MFD_RK808=y
 CONFIG_MFD_CPCAP=y
 CONFIG_MFD_PM8XXX=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
 CONFIG_MFD_RN5T618=y
 CONFIG_MFD_SEC_CORE=y
 CONFIG_MFD_STMPE=y
@@ -543,10 +513,10 @@ CONFIG_MFD_TPS65217=y
 CONFIG_MFD_TPS65218=y
 CONFIG_MFD_TPS6586X=y
 CONFIG_MFD_TPS65910=y
-CONFIG_REGULATOR_ACT8945A=y
-CONFIG_REGULATOR_AB8500=y
 CONFIG_REGULATOR_ACT8865=y
+CONFIG_REGULATOR_ACT8945A=y
 CONFIG_REGULATOR_ANATOP=y
+CONFIG_REGULATOR_AB8500=y
 CONFIG_REGULATOR_AS3711=y
 CONFIG_REGULATOR_AS3722=y
 CONFIG_REGULATOR_AXP20X=y
@@ -554,10 +524,7 @@ CONFIG_REGULATOR_BCM590XX=y
 CONFIG_REGULATOR_CPCAP=y
 CONFIG_REGULATOR_DA9210=y
 CONFIG_REGULATOR_FAN53555=y
-CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_GPIO=y
-CONFIG_MFD_SYSCON=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_REGULATOR_LP872X=y
 CONFIG_REGULATOR_MAX14577=m
 CONFIG_REGULATOR_MAX8907=y
@@ -571,7 +538,8 @@ CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_PBIAS=y
 CONFIG_REGULATOR_PWM=y
 CONFIG_REGULATOR_QCOM_RPM=y
-CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=m
+CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_RN5T618=y
 CONFIG_REGULATOR_S2MPS11=y
 CONFIG_REGULATOR_S5M8767=y
@@ -592,18 +560,17 @@ CONFIG_MEDIA_CEC_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
-CONFIG_USB_GSPCA=y
+CONFIG_USB_VIDEO_CLASS=m
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SOC_CAMERA=m
 CONFIG_SOC_CAMERA_PLATFORM=m
-CONFIG_VIDEO_RCAR_VIN=m
-CONFIG_VIDEO_ATMEL_ISI=m
 CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m
 CONFIG_VIDEO_S5P_FIMC=m
 CONFIG_VIDEO_S5P_MIPI_CSIS=m
 CONFIG_VIDEO_EXYNOS_FIMC_LITE=m
 CONFIG_VIDEO_EXYNOS4_FIMC_IS=m
+CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_VIDEO_ATMEL_ISI=m
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
 CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
@@ -614,19 +581,15 @@ CONFIG_VIDEO_STI_DELTA=m
 CONFIG_VIDEO_RENESAS_JPU=m
 CONFIG_VIDEO_RENESAS_VSP1=m
 CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIVID=m
 CONFIG_CEC_PLATFORM_DRIVERS=y
 CONFIG_VIDEO_SAMSUNG_S5P_CEC=m
 # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
 CONFIG_VIDEO_ADV7180=m
 CONFIG_VIDEO_ML86V7667=m
 CONFIG_DRM=y
-CONFIG_DRM_I2C_ADV7511=m
-CONFIG_DRM_I2C_ADV7511_AUDIO=y
 # CONFIG_DRM_I2C_CH7006 is not set
 # CONFIG_DRM_I2C_SIL164 is not set
-CONFIG_DRM_DUMB_VGA_DAC=m
-CONFIG_DRM_NXP_PTN3460=m
-CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_NOUVEAU=m
 CONFIG_DRM_EXYNOS=m
 CONFIG_DRM_EXYNOS_FIMD=y
@@ -645,13 +608,18 @@ CONFIG_DRM_RCAR_LVDS=y
 CONFIG_DRM_SUN4I=m
 CONFIG_DRM_FSL_DCU=m
 CONFIG_DRM_TEGRA=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
 CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
 CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
-CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_NXP_PTN3460=m
+CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_SII9234=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
 CONFIG_DRM_STI=m
-CONFIG_DRM_VC4=y
+CONFIG_DRM_VC4=m
 CONFIG_DRM_ETNAVIV=m
 CONFIG_DRM_MXSFB=m
 CONFIG_FB_ARMCLCD=y
@@ -659,8 +627,6 @@ CONFIG_FB_EFI=y
 CONFIG_FB_WM8505=y
 CONFIG_FB_SH_MOBILE_LCDC=y
 CONFIG_FB_SIMPLE=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_LCD_PLATFORM=m
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_BACKLIGHT_AS3711=y
@@ -668,7 +634,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
 CONFIG_SOUND=m
 CONFIG_SND=m
-CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_HDA_TEGRA=m
 CONFIG_SND_HDA_INPUT_BEEP=y
 CONFIG_SND_HDA_PATCH_LOADER=y
@@ -692,7 +657,7 @@ CONFIG_SND_SOC_SNOW=m
 CONFIG_SND_SOC_ODROID=m
 CONFIG_SND_SOC_SH4_FSI=m
 CONFIG_SND_SOC_RCAR=m
-CONFIG_SND_SIMPLE_SCU_CARD=m
+CONFIG_SND_SOC_STI=m
 CONFIG_SND_SUN4I_CODEC=m
 CONFIG_SND_SOC_TEGRA=m
 CONFIG_SND_SOC_TEGRA20_I2S=m
@@ -703,31 +668,25 @@ CONFIG_SND_SOC_TEGRA_WM8903=m
 CONFIG_SND_SOC_TEGRA_WM9712=m
 CONFIG_SND_SOC_TEGRA_TRIMSLICE=m
 CONFIG_SND_SOC_TEGRA_ALC5632=m
-CONFIG_SND_SOC_CPCAP=m
 CONFIG_SND_SOC_TEGRA_MAX98090=m
 CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_CPCAP=m
 CONFIG_SND_SOC_SGTL5000=m
 CONFIG_SND_SOC_SPDIF=m
-CONFIG_SND_SOC_WM8978=m
-CONFIG_SND_SOC_STI=m
 CONFIG_SND_SOC_STI_SAS=m
-CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SIMPLE_SCU_CARD=m
 CONFIG_USB=y
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
-CONFIG_USB_XHCI_RCAR=m
 CONFIG_USB_XHCI_TEGRA=m
 CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=m
-CONFIG_USB_EHCI_EXYNOS=y
-CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD_STI=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ISP1760=y
+CONFIG_USB_EHCI_TEGRA=y
+CONFIG_USB_EHCI_EXYNOS=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_STI=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_EXYNOS=m
 CONFIG_USB_R8A66597_HCD=m
 CONFIG_USB_RENESAS_USBHS=m
@@ -746,18 +705,18 @@ CONFIG_USB_TI_CPPI41_DMA=y
 CONFIG_USB_TUSB_OMAP_DMA=y
 CONFIG_USB_DWC3=y
 CONFIG_USB_DWC2=y
-CONFIG_USB_HSIC_USB3503=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
 CONFIG_AB8500_USB=y
-CONFIG_KEYSTONE_USB_PHY=y
+CONFIG_KEYSTONE_USB_PHY=m
 CONFIG_NOP_USB_XCEIV=m
 CONFIG_AM335X_PHY_USB=m
 CONFIG_TWL6030_USB=m
 CONFIG_USB_GPIO_VBUS=y
 CONFIG_USB_ISP1301=y
-CONFIG_USB_MSM_OTG=m
 CONFIG_USB_MXS_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_FSL_USB2=y
@@ -793,21 +752,20 @@ CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_MMC_SDHCI_ESDHC_IMX=y
 CONFIG_MMC_SDHCI_DOVE=y
 CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_S3C=y
 CONFIG_MMC_SDHCI_PXAV3=y
 CONFIG_MMC_SDHCI_SPEAR=y
-CONFIG_MMC_SDHCI_S3C=y
 CONFIG_MMC_SDHCI_S3C_DMA=y
 CONFIG_MMC_SDHCI_BCM_KONA=y
+CONFIG_MMC_MESON_MX_SDIO=y
 CONFIG_MMC_SDHCI_ST=y
 CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
 CONFIG_MMC_ATMELMCI=y
 CONFIG_MMC_SDHCI_MSM=y
-CONFIG_MMC_MESON_MX_SDIO=y
 CONFIG_MMC_MVSDIO=y
 CONFIG_MMC_SDHI=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_PLTFM=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_MMC_DW_ROCKCHIP=y
 CONFIG_MMC_SH_MMCIF=y
@@ -847,94 +805,85 @@ CONFIG_RTC_DRV_MAX77686=y
 CONFIG_RTC_DRV_RK808=m
 CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_BQ32K=m
-CONFIG_RTC_DRV_PALMAS=y
-CONFIG_RTC_DRV_ST_LPC=y
 CONFIG_RTC_DRV_TWL4030=y
+CONFIG_RTC_DRV_PALMAS=y
 CONFIG_RTC_DRV_TPS6586X=y
 CONFIG_RTC_DRV_TPS65910=y
 CONFIG_RTC_DRV_S35390A=m
 CONFIG_RTC_DRV_RX8581=m
 CONFIG_RTC_DRV_EM3027=y
+CONFIG_RTC_DRV_S5M=m
 CONFIG_RTC_DRV_DA9063=m
 CONFIG_RTC_DRV_EFI=m
 CONFIG_RTC_DRV_DIGICOLOR=m
-CONFIG_RTC_DRV_S5M=m
 CONFIG_RTC_DRV_S3C=m
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_AT91RM9200=m
 CONFIG_RTC_DRV_AT91SAM9=m
 CONFIG_RTC_DRV_VT8500=y
-CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_SUNXI=y
 CONFIG_RTC_DRV_MV=y
 CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_ST_LPC=y
 CONFIG_RTC_DRV_CPCAP=m
 CONFIG_DMADEVICES=y
-CONFIG_DW_DMAC=y
 CONFIG_AT_HDMAC=y
 CONFIG_AT_XDMAC=y
+CONFIG_DMA_BCM2835=y
+CONFIG_DMA_SUN6I=y
 CONFIG_FSL_EDMA=y
+CONFIG_IMX_DMA=y
+CONFIG_IMX_SDMA=y
 CONFIG_MV_XOR=y
+CONFIG_MXS_DMA=y
+CONFIG_PL330_DMA=y
+CONFIG_SIRF_DMA=y
+CONFIG_STE_DMA40=y
+CONFIG_ST_FDMA=m
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_XILINX_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_DW_DMAC=y
 CONFIG_SH_DMAE=y
 CONFIG_RCAR_DMAC=y
 CONFIG_RENESAS_USB_DMAC=m
-CONFIG_STE_DMA40=y
-CONFIG_SIRF_DMA=y
-CONFIG_TI_EDMA=y
-CONFIG_PL330_DMA=y
-CONFIG_IMX_SDMA=y
-CONFIG_IMX_DMA=y
-CONFIG_MXS_DMA=y
-CONFIG_DMA_BCM2835=y
-CONFIG_DMA_OMAP=y
-CONFIG_QCOM_BAM_DMA=y
-CONFIG_XILINX_DMA=y
-CONFIG_DMA_SUN6I=y
-CONFIG_ST_FDMA=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_MMIO=y
 CONFIG_STAGING=y
-CONFIG_SENSORS_ISL29018=y
-CONFIG_SENSORS_ISL29028=y
 CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_NVEC_POWER=y
 CONFIG_NVEC_PAZ00=y
-CONFIG_BCMA=y
-CONFIG_BCMA_HOST_SOC=y
-CONFIG_BCMA_DRIVER_GMAC_CMN=y
-CONFIG_BCMA_DRIVER_GPIO=y
-CONFIG_QCOM_GSBI=y
-CONFIG_QCOM_PM=y
-CONFIG_QCOM_SMEM=y
-CONFIG_QCOM_SMD_RPM=y
-CONFIG_QCOM_SMP2P=y
-CONFIG_QCOM_SMSM=y
-CONFIG_QCOM_WCNSS_CTRL=m
-CONFIG_ROCKCHIP_PM_DOMAINS=y
-CONFIG_COMMON_CLK_QCOM=y
-CONFIG_QCOM_CLK_RPM=y
-CONFIG_CHROME_PLATFORMS=y
 CONFIG_STAGING_BOARD=y
-CONFIG_CROS_EC_CHARDEV=m
 CONFIG_COMMON_CLK_MAX77686=y
 CONFIG_COMMON_CLK_RK808=m
 CONFIG_COMMON_CLK_S2MPS11=m
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_RPM=y
 CONFIG_APQ_MMCC_8084=y
 CONFIG_MSM_GCC_8660=y
 CONFIG_MSM_MMCC_8960=y
 CONFIG_MSM_MMCC_8974=y
-CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_BCM2835_MBOX=y
 CONFIG_ROCKCHIP_IOMMU=y
 CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
 CONFIG_REMOTEPROC=m
 CONFIG_ST_REMOTEPROC=m
 CONFIG_RPMSG_VIRTIO=m
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_GSBI=y
+CONFIG_QCOM_PM=y
+CONFIG_QCOM_SMD_RPM=m
+CONFIG_QCOM_WCNSS_CTRL=m
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_2x_SOC=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_ARCH_TEGRA_114_SOC=y
+CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_PM_DEVFREQ=y
 CONFIG_ARM_TEGRA_DEVFREQ=m
-CONFIG_MEMORY=y
-CONFIG_EXTCON=y
 CONFIG_TI_AEMIF=y
 CONFIG_IIO=y
 CONFIG_IIO_SW_TRIGGER=y
@@ -947,56 +896,54 @@ CONFIG_VF610_ADC=m
 CONFIG_XILINX_XADC=y
 CONFIG_MPU3050_I2C=y
 CONFIG_CM36651=m
+CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
 CONFIG_AK8975=y
-CONFIG_RASPBERRYPI_POWER=y
 CONFIG_IIO_HRTIMER_TRIGGER=y
 CONFIG_PWM=y
 CONFIG_PWM_ATMEL=m
 CONFIG_PWM_ATMEL_HLCDC_PWM=m
 CONFIG_PWM_ATMEL_TCB=m
+CONFIG_PWM_BCM2835=y
+CONFIG_PWM_BRCMSTB=m
 CONFIG_PWM_FSL_FTM=m
 CONFIG_PWM_MESON=m
 CONFIG_PWM_RCAR=m
 CONFIG_PWM_RENESAS_TPU=y
 CONFIG_PWM_ROCKCHIP=m
 CONFIG_PWM_SAMSUNG=m
+CONFIG_PWM_STI=y
 CONFIG_PWM_SUN4I=y
 CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
+CONFIG_KEYSTONE_IRQ=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_SUN9I_USB=y
 CONFIG_PHY_HIX5HD2_SATA=y
-CONFIG_E1000E=y
-CONFIG_PWM_STI=y
-CONFIG_PWM_BCM2835=y
-CONFIG_PWM_BRCMSTB=m
-CONFIG_PHY_DM816X_USB=m
-CONFIG_OMAP_USB2=y
-CONFIG_TI_PIPE3=y
-CONFIG_TWL4030_USB=m
+CONFIG_PHY_BERLIN_SATA=y
 CONFIG_PHY_BERLIN_USB=y
 CONFIG_PHY_CPCAP_USB=m
-CONFIG_PHY_BERLIN_SATA=y
+CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_RCAR_GEN2=m
 CONFIG_PHY_ROCKCHIP_DP=m
 CONFIG_PHY_ROCKCHIP_USB=y
-CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_SAMSUNG_USB2=m
 CONFIG_PHY_MIPHY28LP=y
-CONFIG_PHY_RCAR_GEN2=m
 CONFIG_PHY_STIH407_USB=y
 CONFIG_PHY_STM32_USBPHYC=y
-CONFIG_PHY_SUN4I_USB=y
-CONFIG_PHY_SUN9I_USB=y
-CONFIG_PHY_SAMSUNG_USB2=m
 CONFIG_PHY_TEGRA_XUSB=y
-CONFIG_PHY_BRCM_SATA=y
-CONFIG_NVMEM=y
+CONFIG_PHY_DM816X_USB=m
+CONFIG_OMAP_USB2=y
+CONFIG_TI_PIPE3=y
+CONFIG_TWL4030_USB=m
 CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_NVMEM_SUNXI_SID=y
 CONFIG_NVMEM_VF610_OCOTP=y
-CONFIG_BCM2835_MBOX=y
 CONFIG_RASPBERRYPI_FIRMWARE=y
-CONFIG_EFI_VARS=m
-CONFIG_EFI_CAPSULE_LOADER=m
 CONFIG_BCM47XX_NVRAM=y
 CONFIG_BCM47XX_SPROM=y
+CONFIG_EFI_VARS=m
+CONFIG_EFI_CAPSULE_LOADER=m
 CONFIG_EXT4_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_MSDOS_FS=y
@@ -1004,7 +951,6 @@ CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_UBIFS_FS=y
-CONFIG_TMPFS=y
 CONFIG_SQUASHFS=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_SQUASHFS_XZ=y
@@ -1020,13 +966,7 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=y
 CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_CPUFREQ_DT=y
-CONFIG_KEYSTONE_IRQ=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_ST=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
@@ -1035,27 +975,19 @@ CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_DEV_MARVELL_CESA=m
 CONFIG_CRYPTO_DEV_EXYNOS_RNG=m
 CONFIG_CRYPTO_DEV_S5P=m
+CONFIG_CRYPTO_DEV_ATMEL_AES=m
+CONFIG_CRYPTO_DEV_ATMEL_TDES=m
+CONFIG_CRYPTO_DEV_ATMEL_SHA=m
 CONFIG_CRYPTO_DEV_SUN4I_SS=m
 CONFIG_CRYPTO_DEV_ROCKCHIP=m
 CONFIG_ARM_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM=m
 CONFIG_CRYPTO_SHA1_ARM_NEON=m
 CONFIG_CRYPTO_SHA1_ARM_CE=m
 CONFIG_CRYPTO_SHA2_ARM_CE=m
-CONFIG_CRYPTO_SHA256_ARM=m
 CONFIG_CRYPTO_SHA512_ARM=m
 CONFIG_CRYPTO_AES_ARM=m
 CONFIG_CRYPTO_AES_ARM_BS=m
 CONFIG_CRYPTO_AES_ARM_CE=m
-CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_CRYPTO_CRC32_ARM_CE=m
-CONFIG_CRYPTO_CRCT10DIF_ARM_CE=m
 CONFIG_CRYPTO_GHASH_ARM_CE=m
-CONFIG_CRYPTO_DEV_ATMEL_AES=m
-CONFIG_CRYPTO_DEV_ATMEL_TDES=m
-CONFIG_CRYPTO_DEV_ATMEL_SHA=m
-CONFIG_VIDEO_VIVID=m
-CONFIG_VIRTIO=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_PCI_LEGACY=y
-CONFIG_VIRTIO_MMIO=y
+CONFIG_CRYPTO_CRC32_ARM_CE=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
index f09e9d66d605f4159990ad044cfd29486d621d20..dec130e7078c9adc10dae920c2c706143c7e126e 100644 (file)
@@ -544,7 +544,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
         * Increment event counter and perform fixup for the pre-signal
         * frame.
         */
-       rseq_signal_deliver(regs);
+       rseq_signal_deliver(ksig, regs);
 
        /*
         * Set up the stack frame
@@ -666,7 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
                        } else {
                                clear_thread_flag(TIF_NOTIFY_RESUME);
                                tracehook_notify_resume(regs);
-                               rseq_handle_notify_resume(regs);
+                               rseq_handle_notify_resume(NULL, regs);
                        }
                }
                local_irq_disable();
index c46a728df44ead2a0b986a1f218e3c61e9820c34..25aac6ee2ab18cdd0189c27bac759efc02f2d1c5 100644 (file)
@@ -20,6 +20,7 @@ config ARCH_BCM_IPROC
        select GPIOLIB
        select ARM_AMBA
        select PINCTRL
+       select PCI_DOMAINS if PCI
        help
          This enables support for systems based on Broadcom IPROC architected SoCs.
          The IPROC complex contains one or more ARM CPUs along with common
index e22fb40e34bc55be6dd807de63fb9cd009107916..6d5beb11bd965a805107328d2522144f4b857f9a 100644 (file)
@@ -774,7 +774,7 @@ static struct gpiod_lookup_table mmc_gpios_table = {
                GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
                            GPIO_ACTIVE_LOW),
                GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
-                           GPIO_ACTIVE_LOW),
+                           GPIO_ACTIVE_HIGH),
        },
 };
 
index d0f62eacf59da510388dd206d2673eb1b6aa84e3..4adb901dd5ebdd99f2a2b747c1bc237e123a87de 100644 (file)
@@ -10,6 +10,7 @@ menuconfig ARCH_SOCFPGA
        select HAVE_ARM_SCU
        select HAVE_ARM_TWD if SMP
        select MFD_SYSCON
+       select PCI_DOMAINS if PCI
 
 if ARCH_SOCFPGA
 config SOCFPGA_SUSPEND
index 6e8b7161303936908b3b2b7adfced5d17de379ce..25b3ee85066e16e95652b9963645dab61bcb7bbd 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cacheflush.h>
 #include <asm/hwcap.h>
 #include <asm/opcodes.h>
+#include <asm/system_info.h>
 
 #include "bpf_jit_32.h"
 
  * The callee saved registers depends on whether frame pointers are enabled.
  * With frame pointers (to be compliant with the ABI):
  *
- *                                high
- * original ARM_SP =>     +------------------+ \
- *                        |        pc        | |
- * current ARM_FP =>      +------------------+ } callee saved registers
- *                        |r4-r8,r10,fp,ip,lr| |
- *                        +------------------+ /
- *                                low
+ *                              high
+ * original ARM_SP =>     +--------------+ \
+ *                        |      pc      | |
+ * current ARM_FP =>      +--------------+ } callee saved registers
+ *                        |r4-r9,fp,ip,lr| |
+ *                        +--------------+ /
+ *                              low
  *
  * Without frame pointers:
  *
- *                                high
- * original ARM_SP =>     +------------------+
- *                        | r4-r8,r10,fp,lr  | callee saved registers
- * current ARM_FP =>      +------------------+
- *                                low
+ *                              high
+ * original ARM_SP =>     +--------------+
+ *                        |  r4-r9,fp,lr | callee saved registers
+ * current ARM_FP =>      +--------------+
+ *                              low
  *
  * When popping registers off the stack at the end of a BPF function, we
  * reference them via the current ARM_FP register.
  */
 #define CALLEE_MASK    (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
-                        1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
+                        1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
                         1 << ARM_FP)
 #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
 #define CALLEE_POP_MASK  (CALLEE_MASK | 1 << ARM_PC)
 
-#define STACK_OFFSET(k)        (k)
+enum {
+       /* Stack layout - these are offsets from (top of stack - 4) */
+       BPF_R2_HI,
+       BPF_R2_LO,
+       BPF_R3_HI,
+       BPF_R3_LO,
+       BPF_R4_HI,
+       BPF_R4_LO,
+       BPF_R5_HI,
+       BPF_R5_LO,
+       BPF_R7_HI,
+       BPF_R7_LO,
+       BPF_R8_HI,
+       BPF_R8_LO,
+       BPF_R9_HI,
+       BPF_R9_LO,
+       BPF_FP_HI,
+       BPF_FP_LO,
+       BPF_TC_HI,
+       BPF_TC_LO,
+       BPF_AX_HI,
+       BPF_AX_LO,
+       /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
+        * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
+        * BPF_REG_FP and Tail call counts.
+        */
+       BPF_JIT_SCRATCH_REGS,
+};
+
+/*
+ * Negative "register" values indicate the register is stored on the stack
+ * and are the offset from the top of the eBPF JIT scratch space.
+ */
+#define STACK_OFFSET(k)        (-4 - (k) * 4)
+#define SCRATCH_SIZE   (BPF_JIT_SCRATCH_REGS * 4)
+
+#ifdef CONFIG_FRAME_POINTER
+#define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4)
+#else
+#define EBPF_SCRATCH_TO_ARM_FP(x) (x)
+#endif
+
 #define TMP_REG_1      (MAX_BPF_JIT_REG + 0)   /* TEMP Register 1 */
 #define TMP_REG_2      (MAX_BPF_JIT_REG + 1)   /* TEMP Register 2 */
 #define TCALL_CNT      (MAX_BPF_JIT_REG + 2)   /* Tail Call Count */
  * scratch memory space and we have to build eBPF 64 bit register from those.
  *
  */
-static const u8 bpf2a32[][2] = {
+static const s8 bpf2a32[][2] = {
        /* return value from in-kernel function, and exit value from eBPF */
        [BPF_REG_0] = {ARM_R1, ARM_R0},
        /* arguments from eBPF program to in-kernel function */
        [BPF_REG_1] = {ARM_R3, ARM_R2},
        /* Stored on stack scratch space */
-       [BPF_REG_2] = {STACK_OFFSET(0), STACK_OFFSET(4)},
-       [BPF_REG_3] = {STACK_OFFSET(8), STACK_OFFSET(12)},
-       [BPF_REG_4] = {STACK_OFFSET(16), STACK_OFFSET(20)},
-       [BPF_REG_5] = {STACK_OFFSET(24), STACK_OFFSET(28)},
+       [BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)},
+       [BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)},
+       [BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)},
+       [BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)},
        /* callee saved registers that in-kernel function will preserve */
        [BPF_REG_6] = {ARM_R5, ARM_R4},
        /* Stored on stack scratch space */
-       [BPF_REG_7] = {STACK_OFFSET(32), STACK_OFFSET(36)},
-       [BPF_REG_8] = {STACK_OFFSET(40), STACK_OFFSET(44)},
-       [BPF_REG_9] = {STACK_OFFSET(48), STACK_OFFSET(52)},
+       [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
+       [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
+       [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
        /* Read only Frame Pointer to access Stack */
-       [BPF_REG_FP] = {STACK_OFFSET(56), STACK_OFFSET(60)},
+       [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
        /* Temporary Register for internal BPF JIT, can be used
         * for constant blindings and others.
         */
        [TMP_REG_1] = {ARM_R7, ARM_R6},
-       [TMP_REG_2] = {ARM_R10, ARM_R8},
+       [TMP_REG_2] = {ARM_R9, ARM_R8},
        /* Tail call count. Stored on stack scratch space. */
-       [TCALL_CNT] = {STACK_OFFSET(64), STACK_OFFSET(68)},
+       [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)},
        /* temporary register for blinding constants.
         * Stored on stack scratch space.
         */
-       [BPF_REG_AX] = {STACK_OFFSET(72), STACK_OFFSET(76)},
+       [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
 };
 
 #define        dst_lo  dst[1]
@@ -151,6 +193,7 @@ struct jit_ctx {
        unsigned int idx;
        unsigned int prologue_bytes;
        unsigned int epilogue_offset;
+       unsigned int cpu_architecture;
        u32 flags;
        u32 *offsets;
        u32 *target;
@@ -195,10 +238,56 @@ static inline void emit(u32 inst, struct jit_ctx *ctx)
        _emit(ARM_COND_AL, inst, ctx);
 }
 
+/*
+ * This is rather horrid, but necessary to convert an integer constant
+ * to an immediate operand for the opcodes, and be able to detect at
+ * build time whether the constant can't be converted (iow, usable in
+ * BUILD_BUG_ON()).
+ */
+#define imm12val(v, s) (rol32(v, (s)) | (s) << 7)
+#define const_imm8m(x)                                 \
+       ({ int r;                                       \
+          u32 v = (x);                                 \
+          if (!(v & ~0x000000ff))                      \
+               r = imm12val(v, 0);                     \
+          else if (!(v & ~0xc000003f))                 \
+               r = imm12val(v, 2);                     \
+          else if (!(v & ~0xf000000f))                 \
+               r = imm12val(v, 4);                     \
+          else if (!(v & ~0xfc000003))                 \
+               r = imm12val(v, 6);                     \
+          else if (!(v & ~0xff000000))                 \
+               r = imm12val(v, 8);                     \
+          else if (!(v & ~0x3fc00000))                 \
+               r = imm12val(v, 10);                    \
+          else if (!(v & ~0x0ff00000))                 \
+               r = imm12val(v, 12);                    \
+          else if (!(v & ~0x03fc0000))                 \
+               r = imm12val(v, 14);                    \
+          else if (!(v & ~0x00ff0000))                 \
+               r = imm12val(v, 16);                    \
+          else if (!(v & ~0x003fc000))                 \
+               r = imm12val(v, 18);                    \
+          else if (!(v & ~0x000ff000))                 \
+               r = imm12val(v, 20);                    \
+          else if (!(v & ~0x0003fc00))                 \
+               r = imm12val(v, 22);                    \
+          else if (!(v & ~0x0000ff00))                 \
+               r = imm12val(v, 24);                    \
+          else if (!(v & ~0x00003fc0))                 \
+               r = imm12val(v, 26);                    \
+          else if (!(v & ~0x00000ff0))                 \
+               r = imm12val(v, 28);                    \
+          else if (!(v & ~0x000003fc))                 \
+               r = imm12val(v, 30);                    \
+          else                                         \
+               r = -1;                                 \
+          r; })
+
 /*
  * Checks if immediate value can be converted to imm12(12 bits) value.
  */
-static int16_t imm8m(u32 x)
+static int imm8m(u32 x)
 {
        u32 rot;
 
@@ -208,6 +297,38 @@ static int16_t imm8m(u32 x)
        return -1;
 }
 
+#define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x))
+
+static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12)
+{
+       op |= rt << 12 | rn << 16;
+       if (imm12 >= 0)
+               op |= ARM_INST_LDST__U;
+       else
+               imm12 = -imm12;
+       return op | (imm12 & ARM_INST_LDST__IMM12);
+}
+
+static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
+{
+       op |= rt << 12 | rn << 16;
+       if (imm8 >= 0)
+               op |= ARM_INST_LDST__U;
+       else
+               imm8 = -imm8;
+       return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f);
+}
+
+#define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off)
+#define ARM_LDRB_I(rt, rn, off)        arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off)
+#define ARM_LDRD_I(rt, rn, off)        arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
+#define ARM_LDRH_I(rt, rn, off)        arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
+
+#define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
+#define ARM_STRB_I(rt, rn, off)        arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
+#define ARM_STRD_I(rt, rn, off)        arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
+#define ARM_STRH_I(rt, rn, off)        arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off)
+
 /*
  * Initializes the JIT space with undefined instructions.
  */
@@ -227,19 +348,10 @@ static void jit_fill_hole(void *area, unsigned int size)
 #define STACK_ALIGNMENT        4
 #endif
 
-/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
- * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
- * BPF_REG_FP and Tail call counts.
- */
-#define SCRATCH_SIZE 80
-
 /* total stack size used in JITed code */
 #define _STACK_SIZE    (ctx->prog->aux->stack_depth + SCRATCH_SIZE)
 #define STACK_SIZE     ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
 
-/* Get the offset of eBPF REGISTERs stored on scratch space. */
-#define STACK_VAR(off) (STACK_SIZE - off)
-
 #if __LINUX_ARM_ARCH__ < 7
 
 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
@@ -355,7 +467,7 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
 
 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
 {
-       const u8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp = bpf2a32[TMP_REG_1];
 
 #if __LINUX_ARM_ARCH__ == 7
        if (elf_hwcap & HWCAP_IDIVA) {
@@ -402,44 +514,110 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
                emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
 }
 
-/* Checks whether BPF register is on scratch stack space or not. */
-static inline bool is_on_stack(u8 bpf_reg)
+/* Is the translated BPF register on stack? */
+static bool is_stacked(s8 reg)
+{
+       return reg < 0;
+}
+
+/* If a BPF register is on the stack (stk is true), load it to the
+ * supplied temporary register and return the temporary register
+ * for subsequent operations, otherwise just use the CPU register.
+ */
+static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx)
+{
+       if (is_stacked(reg)) {
+               emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
+               reg = tmp;
+       }
+       return reg;
+}
+
+static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp,
+                                  struct jit_ctx *ctx)
 {
-       static u8 stack_regs[] = {BPF_REG_AX, BPF_REG_3, BPF_REG_4, BPF_REG_5,
-                               BPF_REG_7, BPF_REG_8, BPF_REG_9, TCALL_CNT,
-                               BPF_REG_2, BPF_REG_FP};
-       int i, reg_len = sizeof(stack_regs);
-
-       for (i = 0 ; i < reg_len ; i++) {
-               if (bpf_reg == stack_regs[i])
-                       return true;
+       if (is_stacked(reg[1])) {
+               if (__LINUX_ARM_ARCH__ >= 6 ||
+                   ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
+                       emit(ARM_LDRD_I(tmp[1], ARM_FP,
+                                       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+               } else {
+                       emit(ARM_LDR_I(tmp[1], ARM_FP,
+                                      EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+                       emit(ARM_LDR_I(tmp[0], ARM_FP,
+                                      EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
+               }
+               reg = tmp;
+       }
+       return reg;
+}
+
+/* If a BPF register is on the stack (stk is true), save the register
+ * back to the stack.  If the source register is not the same, then
+ * move it into the correct register.
+ */
+static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx)
+{
+       if (is_stacked(reg))
+               emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
+       else if (reg != src)
+               emit(ARM_MOV_R(reg, src), ctx);
+}
+
+static void arm_bpf_put_reg64(const s8 *reg, const s8 *src,
+                             struct jit_ctx *ctx)
+{
+       if (is_stacked(reg[1])) {
+               if (__LINUX_ARM_ARCH__ >= 6 ||
+                   ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
+                       emit(ARM_STRD_I(src[1], ARM_FP,
+                                      EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+               } else {
+                       emit(ARM_STR_I(src[1], ARM_FP,
+                                      EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
+                       emit(ARM_STR_I(src[0], ARM_FP,
+                                      EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
+               }
+       } else {
+               if (reg[1] != src[1])
+                       emit(ARM_MOV_R(reg[1], src[1]), ctx);
+               if (reg[0] != src[0])
+                       emit(ARM_MOV_R(reg[0], src[0]), ctx);
        }
-       return false;
 }
 
-static inline void emit_a32_mov_i(const u8 dst, const u32 val,
-                                 bool dstk, struct jit_ctx *ctx)
+static inline void emit_a32_mov_i(const s8 dst, const u32 val,
+                                 struct jit_ctx *ctx)
 {
-       const u8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp = bpf2a32[TMP_REG_1];
 
-       if (dstk) {
+       if (is_stacked(dst)) {
                emit_mov_i(tmp[1], val, ctx);
-               emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx);
+               arm_bpf_put_reg32(dst, tmp[1], ctx);
        } else {
                emit_mov_i(dst, val, ctx);
        }
 }
 
+static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx)
+{
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
+
+       emit_mov_i(rd[1], (u32)val, ctx);
+       emit_mov_i(rd[0], val >> 32, ctx);
+
+       arm_bpf_put_reg64(dst, rd, ctx);
+}
+
 /* Sign extended move */
-static inline void emit_a32_mov_i64(const bool is64, const u8 dst[],
-                                 const u32 val, bool dstk,
-                                 struct jit_ctx *ctx) {
-       u32 hi = 0;
+static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[],
+                                      const u32 val, struct jit_ctx *ctx) {
+       u64 val64 = val;
 
        if (is64 && (val & (1<<31)))
-               hi = (u32)~0;
-       emit_a32_mov_i(dst_lo, val, dstk, ctx);
-       emit_a32_mov_i(dst_hi, hi, dstk, ctx);
+               val64 |= 0xffffffff00000000ULL;
+       emit_a32_mov_i64(dst, val64, ctx);
 }
 
 static inline void emit_a32_add_r(const u8 dst, const u8 src,
@@ -521,75 +699,94 @@ static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
 /* ALU operation (32 bit)
  * dst = dst (op) src
  */
-static inline void emit_a32_alu_r(const u8 dst, const u8 src,
-                                 bool dstk, bool sstk,
+static inline void emit_a32_alu_r(const s8 dst, const s8 src,
                                  struct jit_ctx *ctx, const bool is64,
                                  const bool hi, const u8 op) {
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       u8 rn = sstk ? tmp[1] : src;
-
-       if (sstk)
-               emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx);
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       s8 rn, rd;
 
+       rn = arm_bpf_get_reg32(src, tmp[1], ctx);
+       rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
        /* ALU operation */
-       if (dstk) {
-               emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
-               emit_alu_r(tmp[0], rn, is64, hi, op, ctx);
-               emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
-       } else {
-               emit_alu_r(dst, rn, is64, hi, op, ctx);
-       }
+       emit_alu_r(rd, rn, is64, hi, op, ctx);
+       arm_bpf_put_reg32(dst, rd, ctx);
 }
 
 /* ALU operation (64 bit) */
-static inline void emit_a32_alu_r64(const bool is64, const u8 dst[],
-                                 const u8 src[], bool dstk,
-                                 bool sstk, struct jit_ctx *ctx,
+static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
+                                 const s8 src[], struct jit_ctx *ctx,
                                  const u8 op) {
-       emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op);
-       if (is64)
-               emit_a32_alu_r(dst_hi, src_hi, dstk, sstk, ctx, is64, true, op);
-       else
-               emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *rd;
+
+       rd = arm_bpf_get_reg64(dst, tmp, ctx);
+       if (is64) {
+               const s8 *rs;
+
+               rs = arm_bpf_get_reg64(src, tmp2, ctx);
+
+               /* ALU operation */
+               emit_alu_r(rd[1], rs[1], true, false, op, ctx);
+               emit_alu_r(rd[0], rs[0], true, true, op, ctx);
+       } else {
+               s8 rs;
+
+               rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+
+               /* ALU operation */
+               emit_alu_r(rd[1], rs, true, false, op, ctx);
+               emit_a32_mov_i(rd[0], 0, ctx);
+       }
+
+       arm_bpf_put_reg64(dst, rd, ctx);
 }
 
-/* dst = imm (4 bytes)*/
-static inline void emit_a32_mov_r(const u8 dst, const u8 src,
-                                 bool dstk, bool sstk,
+/* dst = src (4 bytes)*/
+static inline void emit_a32_mov_r(const s8 dst, const s8 src,
                                  struct jit_ctx *ctx) {
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       u8 rt = sstk ? tmp[0] : src;
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       s8 rt;
 
-       if (sstk)
-               emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx);
-       if (dstk)
-               emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx);
-       else
-               emit(ARM_MOV_R(dst, rt), ctx);
+       rt = arm_bpf_get_reg32(src, tmp[0], ctx);
+       arm_bpf_put_reg32(dst, rt, ctx);
 }
 
 /* dst = src */
-static inline void emit_a32_mov_r64(const bool is64, const u8 dst[],
-                                 const u8 src[], bool dstk,
-                                 bool sstk, struct jit_ctx *ctx) {
-       emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx);
-       if (is64) {
+static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
+                                 const s8 src[],
+                                 struct jit_ctx *ctx) {
+       if (!is64) {
+               emit_a32_mov_r(dst_lo, src_lo, ctx);
+               /* Zero out high 4 bytes */
+               emit_a32_mov_i(dst_hi, 0, ctx);
+       } else if (__LINUX_ARM_ARCH__ < 6 &&
+                  ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
                /* complete 8 byte move */
-               emit_a32_mov_r(dst_hi, src_hi, dstk, sstk, ctx);
+               emit_a32_mov_r(dst_lo, src_lo, ctx);
+               emit_a32_mov_r(dst_hi, src_hi, ctx);
+       } else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
+               const u8 *tmp = bpf2a32[TMP_REG_1];
+
+               emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
+               emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
+       } else if (is_stacked(src_lo)) {
+               emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
+       } else if (is_stacked(dst_lo)) {
+               emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
        } else {
-               /* Zero out high 4 bytes */
-               emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+               emit(ARM_MOV_R(dst[0], src[0]), ctx);
+               emit(ARM_MOV_R(dst[1], src[1]), ctx);
        }
 }
 
 /* Shift operations */
-static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk,
+static inline void emit_a32_alu_i(const s8 dst, const u32 val,
                                struct jit_ctx *ctx, const u8 op) {
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       u8 rd = dstk ? tmp[0] : dst;
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       s8 rd;
 
-       if (dstk)
-               emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+       rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
 
        /* Do shift operation */
        switch (op) {
@@ -604,303 +801,245 @@ static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk,
                break;
        }
 
-       if (dstk)
-               emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+       arm_bpf_put_reg32(dst, rd, ctx);
 }
 
 /* dst = ~dst (64 bit) */
-static inline void emit_a32_neg64(const u8 dst[], bool dstk,
+static inline void emit_a32_neg64(const s8 dst[],
                                struct jit_ctx *ctx){
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       u8 rd = dstk ? tmp[1] : dst[1];
-       u8 rm = dstk ? tmp[0] : dst[0];
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *rd;
 
        /* Setup Operand */
-       if (dstk) {
-               emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
        /* Do Negate Operation */
-       emit(ARM_RSBS_I(rd, rd, 0), ctx);
-       emit(ARM_RSC_I(rm, rm, 0), ctx);
+       emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx);
+       emit(ARM_RSC_I(rd[0], rd[0], 0), ctx);
 
-       if (dstk) {
-               emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       arm_bpf_put_reg64(dst, rd, ctx);
 }
 
 /* dst = dst << src */
-static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
-                                   bool sstk, struct jit_ctx *ctx) {
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[],
+                                   struct jit_ctx *ctx) {
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *rd;
+       s8 rt;
 
        /* Setup Operands */
-       u8 rt = sstk ? tmp2[1] : src_lo;
-       u8 rd = dstk ? tmp[1] : dst_lo;
-       u8 rm = dstk ? tmp[0] : dst_hi;
-
-       if (sstk)
-               emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
-       if (dstk) {
-               emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+       rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
        /* Do LSH operation */
        emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
        emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
-       emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
-       emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
-       emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
-       emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx);
-
-       if (dstk) {
-               emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       } else {
-               emit(ARM_MOV_R(rd, ARM_LR), ctx);
-               emit(ARM_MOV_R(rm, ARM_IP), ctx);
-       }
+       emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx);
+       emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx);
+       emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx);
+       emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx);
+
+       arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
+       arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
 }
 
 /* dst = dst >> src (signed)*/
-static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
-                                   bool sstk, struct jit_ctx *ctx) {
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[],
+                                    struct jit_ctx *ctx) {
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *rd;
+       s8 rt;
+
        /* Setup Operands */
-       u8 rt = sstk ? tmp2[1] : src_lo;
-       u8 rd = dstk ? tmp[1] : dst_lo;
-       u8 rm = dstk ? tmp[0] : dst_hi;
-
-       if (sstk)
-               emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
-       if (dstk) {
-               emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+       rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
        /* Do the ARSH operation */
        emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
        emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
-       emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
-       emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
+       emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
+       emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
        _emit(ARM_COND_MI, ARM_B(0), ctx);
-       emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx);
-       emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx);
-       if (dstk) {
-               emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       } else {
-               emit(ARM_MOV_R(rd, ARM_LR), ctx);
-               emit(ARM_MOV_R(rm, ARM_IP), ctx);
-       }
+       emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx);
+       emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx);
+
+       arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
+       arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
 }
 
 /* dst = dst >> src */
-static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
-                                    bool sstk, struct jit_ctx *ctx) {
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[],
+                                   struct jit_ctx *ctx) {
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *rd;
+       s8 rt;
+
        /* Setup Operands */
-       u8 rt = sstk ? tmp2[1] : src_lo;
-       u8 rd = dstk ? tmp[1] : dst_lo;
-       u8 rm = dstk ? tmp[0] : dst_hi;
-
-       if (sstk)
-               emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
-       if (dstk) {
-               emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+       rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
        /* Do RSH operation */
        emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
        emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
-       emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
-       emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
-       emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
-       emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx);
-       if (dstk) {
-               emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       } else {
-               emit(ARM_MOV_R(rd, ARM_LR), ctx);
-               emit(ARM_MOV_R(rm, ARM_IP), ctx);
-       }
+       emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
+       emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
+       emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx);
+       emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx);
+
+       arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
+       arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
 }
 
 /* dst = dst << val */
-static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk,
-                                    const u32 val, struct jit_ctx *ctx){
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
-       /* Setup operands */
-       u8 rd = dstk ? tmp[1] : dst_lo;
-       u8 rm = dstk ? tmp[0] : dst_hi;
+static inline void emit_a32_lsh_i64(const s8 dst[],
+                                   const u32 val, struct jit_ctx *ctx){
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *rd;
 
-       if (dstk) {
-               emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       /* Setup operands */
+       rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
        /* Do LSH operation */
        if (val < 32) {
-               emit(ARM_MOV_SI(tmp2[0], rm, SRTYPE_ASL, val), ctx);
-               emit(ARM_ORR_SI(rm, tmp2[0], rd, SRTYPE_LSR, 32 - val), ctx);
-               emit(ARM_MOV_SI(rd, rd, SRTYPE_ASL, val), ctx);
+               emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx);
+               emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx);
+               emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx);
        } else {
                if (val == 32)
-                       emit(ARM_MOV_R(rm, rd), ctx);
+                       emit(ARM_MOV_R(rd[0], rd[1]), ctx);
                else
-                       emit(ARM_MOV_SI(rm, rd, SRTYPE_ASL, val - 32), ctx);
-               emit(ARM_EOR_R(rd, rd, rd), ctx);
+                       emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx);
+               emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx);
        }
 
-       if (dstk) {
-               emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       arm_bpf_put_reg64(dst, rd, ctx);
 }
 
 /* dst = dst >> val */
-static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk,
+static inline void emit_a32_rsh_i64(const s8 dst[],
                                    const u32 val, struct jit_ctx *ctx) {
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
-       /* Setup operands */
-       u8 rd = dstk ? tmp[1] : dst_lo;
-       u8 rm = dstk ? tmp[0] : dst_hi;
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *rd;
 
-       if (dstk) {
-               emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       /* Setup operands */
+       rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
        /* Do LSR operation */
        if (val < 32) {
-               emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
-               emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
-               emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx);
+               emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
+               emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
+               emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
        } else if (val == 32) {
-               emit(ARM_MOV_R(rd, rm), ctx);
-               emit(ARM_MOV_I(rm, 0), ctx);
+               emit(ARM_MOV_R(rd[1], rd[0]), ctx);
+               emit(ARM_MOV_I(rd[0], 0), ctx);
        } else {
-               emit(ARM_MOV_SI(rd, rm, SRTYPE_LSR, val - 32), ctx);
-               emit(ARM_MOV_I(rm, 0), ctx);
+               emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx);
+               emit(ARM_MOV_I(rd[0], 0), ctx);
        }
 
-       if (dstk) {
-               emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       arm_bpf_put_reg64(dst, rd, ctx);
 }
 
 /* dst = dst >> val (signed) */
-static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk,
+static inline void emit_a32_arsh_i64(const s8 dst[],
                                     const u32 val, struct jit_ctx *ctx){
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
-        /* Setup operands */
-       u8 rd = dstk ? tmp[1] : dst_lo;
-       u8 rm = dstk ? tmp[0] : dst_hi;
-
-       if (dstk) {
-               emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *rd;
+
+       /* Setup operands */
+       rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
        /* Do ARSH operation */
        if (val < 32) {
-               emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
-               emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
-               emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx);
+               emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
+               emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
+               emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
        } else if (val == 32) {
-               emit(ARM_MOV_R(rd, rm), ctx);
-               emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
+               emit(ARM_MOV_R(rd[1], rd[0]), ctx);
+               emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
        } else {
-               emit(ARM_MOV_SI(rd, rm, SRTYPE_ASR, val - 32), ctx);
-               emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
+               emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx);
+               emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
        }
 
-       if (dstk) {
-               emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
+       arm_bpf_put_reg64(dst, rd, ctx);
 }
 
-static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
-                                   bool sstk, struct jit_ctx *ctx) {
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
+static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
+                                   struct jit_ctx *ctx) {
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *rd, *rt;
+
        /* Setup operands for multiplication */
-       u8 rd = dstk ? tmp[1] : dst_lo;
-       u8 rm = dstk ? tmp[0] : dst_hi;
-       u8 rt = sstk ? tmp2[1] : src_lo;
-       u8 rn = sstk ? tmp2[0] : src_hi;
-
-       if (dstk) {
-               emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       }
-       if (sstk) {
-               emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
-               emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx);
-       }
+       rd = arm_bpf_get_reg64(dst, tmp, ctx);
+       rt = arm_bpf_get_reg64(src, tmp2, ctx);
 
        /* Do Multiplication */
-       emit(ARM_MUL(ARM_IP, rd, rn), ctx);
-       emit(ARM_MUL(ARM_LR, rm, rt), ctx);
+       emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx);
+       emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx);
        emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
 
-       emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
-       emit(ARM_ADD_R(rm, ARM_LR, rm), ctx);
-       if (dstk) {
-               emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
-       } else {
-               emit(ARM_MOV_R(rd, ARM_IP), ctx);
-       }
+       emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx);
+       emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx);
+
+       arm_bpf_put_reg32(dst_lo, ARM_IP, ctx);
+       arm_bpf_put_reg32(dst_hi, rd[0], ctx);
 }
 
 /* *(size *)(dst + off) = src */
-static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
-                             const s32 off, struct jit_ctx *ctx, const u8 sz){
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       u8 rd = dstk ? tmp[1] : dst;
-
-       if (dstk)
-               emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
-       if (off) {
-               emit_a32_mov_i(tmp[0], off, false, ctx);
-               emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx);
+static inline void emit_str_r(const s8 dst, const s8 src[],
+                             s32 off, struct jit_ctx *ctx, const u8 sz){
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       s32 off_max;
+       s8 rd;
+
+       rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
+
+       if (sz == BPF_H)
+               off_max = 0xff;
+       else
+               off_max = 0xfff;
+
+       if (off < 0 || off > off_max) {
+               emit_a32_mov_i(tmp[0], off, ctx);
+               emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
                rd = tmp[0];
+               off = 0;
        }
        switch (sz) {
-       case BPF_W:
-               /* Store a Word */
-               emit(ARM_STR_I(src, rd, 0), ctx);
+       case BPF_B:
+               /* Store a Byte */
+               emit(ARM_STRB_I(src_lo, rd, off), ctx);
                break;
        case BPF_H:
                /* Store a HalfWord */
-               emit(ARM_STRH_I(src, rd, 0), ctx);
+               emit(ARM_STRH_I(src_lo, rd, off), ctx);
                break;
-       case BPF_B:
-               /* Store a Byte */
-               emit(ARM_STRB_I(src, rd, 0), ctx);
+       case BPF_W:
+               /* Store a Word */
+               emit(ARM_STR_I(src_lo, rd, off), ctx);
+               break;
+       case BPF_DW:
+               /* Store a Double Word */
+               emit(ARM_STR_I(src_lo, rd, off), ctx);
+               emit(ARM_STR_I(src_hi, rd, off + 4), ctx);
                break;
        }
 }
 
 /* dst = *(size*)(src + off) */
-static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
+static inline void emit_ldx_r(const s8 dst[], const s8 src,
                              s32 off, struct jit_ctx *ctx, const u8 sz){
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       const u8 *rd = dstk ? tmp : dst;
-       u8 rm = src;
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
+       s8 rm = src;
        s32 off_max;
 
        if (sz == BPF_H)
@@ -909,7 +1048,7 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
                off_max = 0xfff;
 
        if (off < 0 || off > off_max) {
-               emit_a32_mov_i(tmp[0], off, false, ctx);
+               emit_a32_mov_i(tmp[0], off, ctx);
                emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
                rm = tmp[0];
                off = 0;
@@ -921,17 +1060,17 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
        case BPF_B:
                /* Load a Byte */
                emit(ARM_LDRB_I(rd[1], rm, off), ctx);
-               emit_a32_mov_i(dst[0], 0, dstk, ctx);
+               emit_a32_mov_i(rd[0], 0, ctx);
                break;
        case BPF_H:
                /* Load a HalfWord */
                emit(ARM_LDRH_I(rd[1], rm, off), ctx);
-               emit_a32_mov_i(dst[0], 0, dstk, ctx);
+               emit_a32_mov_i(rd[0], 0, ctx);
                break;
        case BPF_W:
                /* Load a Word */
                emit(ARM_LDR_I(rd[1], rm, off), ctx);
-               emit_a32_mov_i(dst[0], 0, dstk, ctx);
+               emit_a32_mov_i(rd[0], 0, ctx);
                break;
        case BPF_DW:
                /* Load a Double Word */
@@ -939,10 +1078,7 @@ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
                emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
                break;
        }
-       if (dstk)
-               emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
-       if (dstk && sz == BPF_DW)
-               emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
+       arm_bpf_put_reg64(dst, rd, ctx);
 }
 
 /* Arithmatic Operation */
@@ -981,64 +1117,66 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
 {
 
        /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
-       const u8 *r2 = bpf2a32[BPF_REG_2];
-       const u8 *r3 = bpf2a32[BPF_REG_3];
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
-       const u8 *tcc = bpf2a32[TCALL_CNT];
+       const s8 *r2 = bpf2a32[BPF_REG_2];
+       const s8 *r3 = bpf2a32[BPF_REG_3];
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *tcc = bpf2a32[TCALL_CNT];
+       const s8 *tc;
        const int idx0 = ctx->idx;
 #define cur_offset (ctx->idx - idx0)
 #define jmp_offset (out_offset - (cur_offset) - 2)
-       u32 off, lo, hi;
+       u32 lo, hi;
+       s8 r_array, r_index;
+       int off;
 
        /* if (index >= array->map.max_entries)
         *      goto out;
         */
+       BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) >
+                    ARM_INST_LDST__IMM12);
        off = offsetof(struct bpf_array, map.max_entries);
-       /* array->map.max_entries */
-       emit_a32_mov_i(tmp[1], off, false, ctx);
-       emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
-       emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
+       r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx);
        /* index is 32-bit for arrays */
-       emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
+       r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx);
+       /* array->map.max_entries */
+       emit(ARM_LDR_I(tmp[1], r_array, off), ctx);
        /* index >= array->map.max_entries */
-       emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
+       emit(ARM_CMP_R(r_index, tmp[1]), ctx);
        _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
 
+       /* tmp2[0] = array, tmp2[1] = index */
+
        /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
         *      goto out;
         * tail_call_cnt++;
         */
        lo = (u32)MAX_TAIL_CALL_CNT;
        hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
-       emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
-       emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
-       emit(ARM_CMP_I(tmp[0], hi), ctx);
-       _emit(ARM_COND_EQ, ARM_CMP_I(tmp[1], lo), ctx);
+       tc = arm_bpf_get_reg64(tcc, tmp, ctx);
+       emit(ARM_CMP_I(tc[0], hi), ctx);
+       _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx);
        _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
-       emit(ARM_ADDS_I(tmp[1], tmp[1], 1), ctx);
-       emit(ARM_ADC_I(tmp[0], tmp[0], 0), ctx);
-       emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
-       emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
+       emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx);
+       emit(ARM_ADC_I(tc[0], tc[0], 0), ctx);
+       arm_bpf_put_reg64(tcc, tmp, ctx);
 
        /* prog = array->ptrs[index]
         * if (prog == NULL)
         *      goto out;
         */
-       off = offsetof(struct bpf_array, ptrs);
-       emit_a32_mov_i(tmp[1], off, false, ctx);
-       emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
-       emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx);
-       emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
-       emit(ARM_MOV_SI(tmp[0], tmp2[1], SRTYPE_ASL, 2), ctx);
-       emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx);
+       BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0);
+       off = imm8m(offsetof(struct bpf_array, ptrs));
+       emit(ARM_ADD_I(tmp[1], r_array, off), ctx);
+       emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx);
        emit(ARM_CMP_I(tmp[1], 0), ctx);
        _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
 
        /* goto *(prog->bpf_func + prologue_size); */
+       BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) >
+                    ARM_INST_LDST__IMM12);
        off = offsetof(struct bpf_prog, bpf_func);
-       emit_a32_mov_i(tmp2[1], off, false, ctx);
-       emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
+       emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx);
        emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
        emit_bx_r(tmp[1], ctx);
 
@@ -1059,7 +1197,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
 static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
 {
 #if __LINUX_ARM_ARCH__ < 6
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
 
        emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
        emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
@@ -1074,7 +1212,7 @@ static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
 static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
 {
 #if __LINUX_ARM_ARCH__ < 6
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
 
        emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
        emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
@@ -1094,28 +1232,27 @@ static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
 }
 
 // push the scratch stack register on top of the stack
-static inline void emit_push_r64(const u8 src[], const u8 shift,
-               struct jit_ctx *ctx)
+static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx)
 {
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *rt;
        u16 reg_set = 0;
 
-       emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx);
-       emit(ARM_LDR_I(tmp2[0], ARM_SP, STACK_VAR(src[0]+shift)), ctx);
+       rt = arm_bpf_get_reg64(src, tmp2, ctx);
 
-       reg_set = (1 << tmp2[1]) | (1 << tmp2[0]);
+       reg_set = (1 << rt[1]) | (1 << rt[0]);
        emit(ARM_PUSH(reg_set), ctx);
 }
 
 static void build_prologue(struct jit_ctx *ctx)
 {
-       const u8 r0 = bpf2a32[BPF_REG_0][1];
-       const u8 r2 = bpf2a32[BPF_REG_1][1];
-       const u8 r3 = bpf2a32[BPF_REG_1][0];
-       const u8 r4 = bpf2a32[BPF_REG_6][1];
-       const u8 fplo = bpf2a32[BPF_REG_FP][1];
-       const u8 fphi = bpf2a32[BPF_REG_FP][0];
-       const u8 *tcc = bpf2a32[TCALL_CNT];
+       const s8 r0 = bpf2a32[BPF_REG_0][1];
+       const s8 r2 = bpf2a32[BPF_REG_1][1];
+       const s8 r3 = bpf2a32[BPF_REG_1][0];
+       const s8 r4 = bpf2a32[BPF_REG_6][1];
+       const s8 fplo = bpf2a32[BPF_REG_FP][1];
+       const s8 fphi = bpf2a32[BPF_REG_FP][0];
+       const s8 *tcc = bpf2a32[TCALL_CNT];
 
        /* Save callee saved registers. */
 #ifdef CONFIG_FRAME_POINTER
@@ -1136,8 +1273,8 @@ static void build_prologue(struct jit_ctx *ctx)
        emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
 
        /* Set up BPF prog stack base register */
-       emit_a32_mov_r(fplo, ARM_IP, true, false, ctx);
-       emit_a32_mov_i(fphi, 0, true, ctx);
+       emit_a32_mov_r(fplo, ARM_IP, ctx);
+       emit_a32_mov_i(fphi, 0, ctx);
 
        /* mov r4, 0 */
        emit(ARM_MOV_I(r4, 0), ctx);
@@ -1146,8 +1283,8 @@ static void build_prologue(struct jit_ctx *ctx)
        emit(ARM_MOV_R(r3, r4), ctx);
        emit(ARM_MOV_R(r2, r0), ctx);
        /* Initialize Tail Count */
-       emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[0])), ctx);
-       emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[1])), ctx);
+       emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[0])), ctx);
+       emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[1])), ctx);
        /* end of prologue */
 }
 
@@ -1178,17 +1315,16 @@ static void build_epilogue(struct jit_ctx *ctx)
 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 {
        const u8 code = insn->code;
-       const u8 *dst = bpf2a32[insn->dst_reg];
-       const u8 *src = bpf2a32[insn->src_reg];
-       const u8 *tmp = bpf2a32[TMP_REG_1];
-       const u8 *tmp2 = bpf2a32[TMP_REG_2];
+       const s8 *dst = bpf2a32[insn->dst_reg];
+       const s8 *src = bpf2a32[insn->src_reg];
+       const s8 *tmp = bpf2a32[TMP_REG_1];
+       const s8 *tmp2 = bpf2a32[TMP_REG_2];
        const s16 off = insn->off;
        const s32 imm = insn->imm;
        const int i = insn - ctx->prog->insnsi;
        const bool is64 = BPF_CLASS(code) == BPF_ALU64;
-       const bool dstk = is_on_stack(insn->dst_reg);
-       const bool sstk = is_on_stack(insn->src_reg);
-       u8 rd, rt, rm, rn;
+       const s8 *rd, *rs;
+       s8 rd_lo, rt, rm, rn;
        s32 jmp_offset;
 
 #define check_imm(bits, imm) do {                              \
@@ -1211,11 +1347,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        case BPF_ALU64 | BPF_MOV | BPF_X:
                switch (BPF_SRC(code)) {
                case BPF_X:
-                       emit_a32_mov_r64(is64, dst, src, dstk, sstk, ctx);
+                       emit_a32_mov_r64(is64, dst, src, ctx);
                        break;
                case BPF_K:
                        /* Sign-extend immediate value to destination reg */
-                       emit_a32_mov_i64(is64, dst, imm, dstk, ctx);
+                       emit_a32_mov_se_i64(is64, dst, imm, ctx);
                        break;
                }
                break;
@@ -1255,8 +1391,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        case BPF_ALU64 | BPF_XOR | BPF_X:
                switch (BPF_SRC(code)) {
                case BPF_X:
-                       emit_a32_alu_r64(is64, dst, src, dstk, sstk,
-                                        ctx, BPF_OP(code));
+                       emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code));
                        break;
                case BPF_K:
                        /* Move immediate value to the temporary register
@@ -1265,9 +1400,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                         * value into temporary reg and then it would be
                         * safe to do the operation on it.
                         */
-                       emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
-                       emit_a32_alu_r64(is64, dst, tmp2, dstk, false,
-                                        ctx, BPF_OP(code));
+                       emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
+                       emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code));
                        break;
                }
                break;
@@ -1277,26 +1411,22 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        case BPF_ALU | BPF_DIV | BPF_X:
        case BPF_ALU | BPF_MOD | BPF_K:
        case BPF_ALU | BPF_MOD | BPF_X:
-               rt = src_lo;
-               rd = dstk ? tmp2[1] : dst_lo;
-               if (dstk)
-                       emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+               rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx);
                switch (BPF_SRC(code)) {
                case BPF_X:
-                       rt = sstk ? tmp2[0] : rt;
-                       if (sstk)
-                               emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)),
-                                    ctx);
+                       rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx);
                        break;
                case BPF_K:
                        rt = tmp2[0];
-                       emit_a32_mov_i(rt, imm, false, ctx);
+                       emit_a32_mov_i(rt, imm, ctx);
+                       break;
+               default:
+                       rt = src_lo;
                        break;
                }
-               emit_udivmod(rd, rd, rt, ctx, BPF_OP(code));
-               if (dstk)
-                       emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
-               emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+               emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code));
+               arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
+               emit_a32_mov_i(dst_hi, 0, ctx);
                break;
        case BPF_ALU64 | BPF_DIV | BPF_K:
        case BPF_ALU64 | BPF_DIV | BPF_X:
@@ -1310,54 +1440,54 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                if (unlikely(imm > 31))
                        return -EINVAL;
                if (imm)
-                       emit_a32_alu_i(dst_lo, imm, dstk, ctx, BPF_OP(code));
-               emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+                       emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code));
+               emit_a32_mov_i(dst_hi, 0, ctx);
                break;
        /* dst = dst << imm */
        case BPF_ALU64 | BPF_LSH | BPF_K:
                if (unlikely(imm > 63))
                        return -EINVAL;
-               emit_a32_lsh_i64(dst, dstk, imm, ctx);
+               emit_a32_lsh_i64(dst, imm, ctx);
                break;
        /* dst = dst >> imm */
        case BPF_ALU64 | BPF_RSH | BPF_K:
                if (unlikely(imm > 63))
                        return -EINVAL;
-               emit_a32_rsh_i64(dst, dstk, imm, ctx);
+               emit_a32_rsh_i64(dst, imm, ctx);
                break;
        /* dst = dst << src */
        case BPF_ALU64 | BPF_LSH | BPF_X:
-               emit_a32_lsh_r64(dst, src, dstk, sstk, ctx);
+               emit_a32_lsh_r64(dst, src, ctx);
                break;
        /* dst = dst >> src */
        case BPF_ALU64 | BPF_RSH | BPF_X:
-               emit_a32_rsh_r64(dst, src, dstk, sstk, ctx);
+               emit_a32_rsh_r64(dst, src, ctx);
                break;
        /* dst = dst >> src (signed) */
        case BPF_ALU64 | BPF_ARSH | BPF_X:
-               emit_a32_arsh_r64(dst, src, dstk, sstk, ctx);
+               emit_a32_arsh_r64(dst, src, ctx);
                break;
        /* dst = dst >> imm (signed) */
        case BPF_ALU64 | BPF_ARSH | BPF_K:
                if (unlikely(imm > 63))
                        return -EINVAL;
-               emit_a32_arsh_i64(dst, dstk, imm, ctx);
+               emit_a32_arsh_i64(dst, imm, ctx);
                break;
        /* dst = ~dst */
        case BPF_ALU | BPF_NEG:
-               emit_a32_alu_i(dst_lo, 0, dstk, ctx, BPF_OP(code));
-               emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+               emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code));
+               emit_a32_mov_i(dst_hi, 0, ctx);
                break;
        /* dst = ~dst (64 bit) */
        case BPF_ALU64 | BPF_NEG:
-               emit_a32_neg64(dst, dstk, ctx);
+               emit_a32_neg64(dst, ctx);
                break;
        /* dst = dst * src/imm */
        case BPF_ALU64 | BPF_MUL | BPF_X:
        case BPF_ALU64 | BPF_MUL | BPF_K:
                switch (BPF_SRC(code)) {
                case BPF_X:
-                       emit_a32_mul_r64(dst, src, dstk, sstk, ctx);
+                       emit_a32_mul_r64(dst, src, ctx);
                        break;
                case BPF_K:
                        /* Move immediate value to the temporary register
@@ -1366,8 +1496,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                         * reg then it would be safe to do the operation
                         * on it.
                         */
-                       emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
-                       emit_a32_mul_r64(dst, tmp2, dstk, false, ctx);
+                       emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
+                       emit_a32_mul_r64(dst, tmp2, ctx);
                        break;
                }
                break;
@@ -1375,25 +1505,20 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        /* dst = htobe(dst) */
        case BPF_ALU | BPF_END | BPF_FROM_LE:
        case BPF_ALU | BPF_END | BPF_FROM_BE:
-               rd = dstk ? tmp[0] : dst_hi;
-               rt = dstk ? tmp[1] : dst_lo;
-               if (dstk) {
-                       emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
-                       emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
-               }
+               rd = arm_bpf_get_reg64(dst, tmp, ctx);
                if (BPF_SRC(code) == BPF_FROM_LE)
                        goto emit_bswap_uxt;
                switch (imm) {
                case 16:
-                       emit_rev16(rt, rt, ctx);
+                       emit_rev16(rd[1], rd[1], ctx);
                        goto emit_bswap_uxt;
                case 32:
-                       emit_rev32(rt, rt, ctx);
+                       emit_rev32(rd[1], rd[1], ctx);
                        goto emit_bswap_uxt;
                case 64:
-                       emit_rev32(ARM_LR, rt, ctx);
-                       emit_rev32(rt, rd, ctx);
-                       emit(ARM_MOV_R(rd, ARM_LR), ctx);
+                       emit_rev32(ARM_LR, rd[1], ctx);
+                       emit_rev32(rd[1], rd[0], ctx);
+                       emit(ARM_MOV_R(rd[0], ARM_LR), ctx);
                        break;
                }
                goto exit;
@@ -1402,36 +1527,30 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                case 16:
                        /* zero-extend 16 bits into 64 bits */
 #if __LINUX_ARM_ARCH__ < 6
-                       emit_a32_mov_i(tmp2[1], 0xffff, false, ctx);
-                       emit(ARM_AND_R(rt, rt, tmp2[1]), ctx);
+                       emit_a32_mov_i(tmp2[1], 0xffff, ctx);
+                       emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx);
 #else /* ARMv6+ */
-                       emit(ARM_UXTH(rt, rt), ctx);
+                       emit(ARM_UXTH(rd[1], rd[1]), ctx);
 #endif
-                       emit(ARM_EOR_R(rd, rd, rd), ctx);
+                       emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
                        break;
                case 32:
                        /* zero-extend 32 bits into 64 bits */
-                       emit(ARM_EOR_R(rd, rd, rd), ctx);
+                       emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
                        break;
                case 64:
                        /* nop */
                        break;
                }
 exit:
-               if (dstk) {
-                       emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
-                       emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
-               }
+               arm_bpf_put_reg64(dst, rd, ctx);
                break;
        /* dst = imm64 */
        case BPF_LD | BPF_IMM | BPF_DW:
        {
-               const struct bpf_insn insn1 = insn[1];
-               u32 hi, lo = imm;
+               u64 val = (u32)imm | (u64)insn[1].imm << 32;
 
-               hi = insn1.imm;
-               emit_a32_mov_i(dst_lo, lo, dstk, ctx);
-               emit_a32_mov_i(dst_hi, hi, dstk, ctx);
+               emit_a32_mov_i64(dst, val, ctx);
 
                return 1;
        }
@@ -1440,10 +1559,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        case BPF_LDX | BPF_MEM | BPF_H:
        case BPF_LDX | BPF_MEM | BPF_B:
        case BPF_LDX | BPF_MEM | BPF_DW:
-               rn = sstk ? tmp2[1] : src_lo;
-               if (sstk)
-                       emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
-               emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
+               rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
+               emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
                break;
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
@@ -1453,18 +1570,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                switch (BPF_SIZE(code)) {
                case BPF_DW:
                        /* Sign-extend immediate value into temp reg */
-                       emit_a32_mov_i64(true, tmp2, imm, false, ctx);
-                       emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, BPF_W);
-                       emit_str_r(dst_lo, tmp2[0], dstk, off+4, ctx, BPF_W);
+                       emit_a32_mov_se_i64(true, tmp2, imm, ctx);
                        break;
                case BPF_W:
                case BPF_H:
                case BPF_B:
-                       emit_a32_mov_i(tmp2[1], imm, false, ctx);
-                       emit_str_r(dst_lo, tmp2[1], dstk, off, ctx,
-                                  BPF_SIZE(code));
+                       emit_a32_mov_i(tmp2[1], imm, ctx);
                        break;
                }
+               emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
                break;
        /* STX XADD: lock *(u32 *)(dst + off) += src */
        case BPF_STX | BPF_XADD | BPF_W:
@@ -1476,25 +1590,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        case BPF_STX | BPF_MEM | BPF_H:
        case BPF_STX | BPF_MEM | BPF_B:
        case BPF_STX | BPF_MEM | BPF_DW:
-       {
-               u8 sz = BPF_SIZE(code);
-
-               rn = sstk ? tmp2[1] : src_lo;
-               rm = sstk ? tmp2[0] : src_hi;
-               if (sstk) {
-                       emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
-                       emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
-               }
-
-               /* Store the value */
-               if (BPF_SIZE(code) == BPF_DW) {
-                       emit_str_r(dst_lo, rn, dstk, off, ctx, BPF_W);
-                       emit_str_r(dst_lo, rm, dstk, off+4, ctx, BPF_W);
-               } else {
-                       emit_str_r(dst_lo, rn, dstk, off, ctx, sz);
-               }
+               rs = arm_bpf_get_reg64(src, tmp2, ctx);
+               emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code));
                break;
-       }
        /* PC += off if dst == src */
        /* PC += off if dst > src */
        /* PC += off if dst >= src */
@@ -1518,12 +1616,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        case BPF_JMP | BPF_JSLT | BPF_X:
        case BPF_JMP | BPF_JSLE | BPF_X:
                /* Setup source registers */
-               rm = sstk ? tmp2[0] : src_hi;
-               rn = sstk ? tmp2[1] : src_lo;
-               if (sstk) {
-                       emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
-                       emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
-               }
+               rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
+               rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
                goto go_jmp;
        /* PC += off if dst == imm */
        /* PC += off if dst > imm */
@@ -1552,18 +1646,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                rm = tmp2[0];
                rn = tmp2[1];
                /* Sign-extend immediate value */
-               emit_a32_mov_i64(true, tmp2, imm, false, ctx);
+               emit_a32_mov_se_i64(true, tmp2, imm, ctx);
 go_jmp:
                /* Setup destination register */
-               rd = dstk ? tmp[0] : dst_hi;
-               rt = dstk ? tmp[1] : dst_lo;
-               if (dstk) {
-                       emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
-                       emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
-               }
+               rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
                /* Check for the condition */
-               emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code));
+               emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code));
 
                /* Setup JUMP instruction */
                jmp_offset = bpf2a32_offset(i+off, i, ctx);
@@ -1619,21 +1708,21 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        /* function call */
        case BPF_JMP | BPF_CALL:
        {
-               const u8 *r0 = bpf2a32[BPF_REG_0];
-               const u8 *r1 = bpf2a32[BPF_REG_1];
-               const u8 *r2 = bpf2a32[BPF_REG_2];
-               const u8 *r3 = bpf2a32[BPF_REG_3];
-               const u8 *r4 = bpf2a32[BPF_REG_4];
-               const u8 *r5 = bpf2a32[BPF_REG_5];
+               const s8 *r0 = bpf2a32[BPF_REG_0];
+               const s8 *r1 = bpf2a32[BPF_REG_1];
+               const s8 *r2 = bpf2a32[BPF_REG_2];
+               const s8 *r3 = bpf2a32[BPF_REG_3];
+               const s8 *r4 = bpf2a32[BPF_REG_4];
+               const s8 *r5 = bpf2a32[BPF_REG_5];
                const u32 func = (u32)__bpf_call_base + (u32)imm;
 
-               emit_a32_mov_r64(true, r0, r1, false, false, ctx);
-               emit_a32_mov_r64(true, r1, r2, false, true, ctx);
-               emit_push_r64(r5, 0, ctx);
-               emit_push_r64(r4, 8, ctx);
-               emit_push_r64(r3, 16, ctx);
+               emit_a32_mov_r64(true, r0, r1, ctx);
+               emit_a32_mov_r64(true, r1, r2, ctx);
+               emit_push_r64(r5, ctx);
+               emit_push_r64(r4, ctx);
+               emit_push_r64(r3, ctx);
 
-               emit_a32_mov_i(tmp[1], func, false, ctx);
+               emit_a32_mov_i(tmp[1], func, ctx);
                emit_blx_r(tmp[1], ctx);
 
                emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
@@ -1745,6 +1834,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
        memset(&ctx, 0, sizeof(ctx));
        ctx.prog = prog;
+       ctx.cpu_architecture = cpu_architecture();
 
        /* Not able to allocate memory for offsets[] , then
         * we must fall back to the interpreter
@@ -1844,7 +1934,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                /* there are 2 passes here */
                bpf_jit_dump(prog->len, image_size, 2, ctx.target);
 
-       set_memory_ro((unsigned long)header, header->pages);
+       bpf_jit_binary_lock_ro(header);
        prog->bpf_func = (void *)ctx.target;
        prog->jited = 1;
        prog->jited_len = image_size;
index d5cf5f6208aa7c993a191564699c00216762c0bb..f4e58bcdaa43825f8b7f0c75dde8b360f6ff5a39 100644 (file)
 #define ARM_INST_EOR_R         0x00200000
 #define ARM_INST_EOR_I         0x02200000
 
-#define ARM_INST_LDRB_I                0x05d00000
+#define ARM_INST_LDST__U       0x00800000
+#define ARM_INST_LDST__IMM12   0x00000fff
+#define ARM_INST_LDRB_I                0x05500000
 #define ARM_INST_LDRB_R                0x07d00000
-#define ARM_INST_LDRH_I                0x01d000b0
+#define ARM_INST_LDRD_I                0x014000d0
+#define ARM_INST_LDRH_I                0x015000b0
 #define ARM_INST_LDRH_R                0x019000b0
-#define ARM_INST_LDR_I         0x05900000
+#define ARM_INST_LDR_I         0x05100000
 #define ARM_INST_LDR_R         0x07900000
 
 #define ARM_INST_LDM           0x08900000
 #define ARM_INST_SBC_R         0x00c00000
 #define ARM_INST_SBCS_R                0x00d00000
 
-#define ARM_INST_STR_I         0x05800000
-#define ARM_INST_STRB_I                0x05c00000
-#define ARM_INST_STRH_I                0x01c000b0
+#define ARM_INST_STR_I         0x05000000
+#define ARM_INST_STRB_I                0x05400000
+#define ARM_INST_STRD_I                0x014000f0
+#define ARM_INST_STRH_I                0x014000b0
 
 #define ARM_INST_TST_R         0x01100000
 #define ARM_INST_TST_I         0x03100000
 #define ARM_EOR_R(rd, rn, rm)  _AL3_R(ARM_INST_EOR, rd, rn, rm)
 #define ARM_EOR_I(rd, rn, imm) _AL3_I(ARM_INST_EOR, rd, rn, imm)
 
-#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
-                                | ((off) & 0xfff))
-#define ARM_LDR_R(rt, rn, rm)  (ARM_INST_LDR_R | (rt) << 12 | (rn) << 16 \
+#define ARM_LDR_R(rt, rn, rm)  (ARM_INST_LDR_R | ARM_INST_LDST__U \
+                                | (rt) << 12 | (rn) << 16 \
                                 | (rm))
-#define ARM_LDRB_I(rt, rn, off)        (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
-                                | (off))
-#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \
+#define ARM_LDR_R_SI(rt, rn, rm, type, imm) \
+                               (ARM_INST_LDR_R | ARM_INST_LDST__U \
+                                | (rt) << 12 | (rn) << 16 \
+                                | (imm) << 7 | (type) << 5 | (rm))
+#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | ARM_INST_LDST__U \
+                                | (rt) << 12 | (rn) << 16 \
                                 | (rm))
-#define ARM_LDRH_I(rt, rn, off)        (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
-                                | (((off) & 0xf0) << 4) | ((off) & 0xf))
-#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \
+#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | ARM_INST_LDST__U \
+                                | (rt) << 12 | (rn) << 16 \
                                 | (rm))
 
 #define ARM_LDM(rn, regs)      (ARM_INST_LDM | (rn) << 16 | (regs))
 #define ARM_SUBS_I(rd, rn, imm)        _AL3_I(ARM_INST_SUBS, rd, rn, imm)
 #define ARM_SBC_I(rd, rn, imm) _AL3_I(ARM_INST_SBC, rd, rn, imm)
 
-#define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \
-                                | ((off) & 0xfff))
-#define ARM_STRH_I(rt, rn, off)        (ARM_INST_STRH_I | (rt) << 12 | (rn) << 16 \
-                                | (((off) & 0xf0) << 4) | ((off) & 0xf))
-#define ARM_STRB_I(rt, rn, off)        (ARM_INST_STRB_I | (rt) << 12 | (rn) << 16 \
-                                | (((off) & 0xf0) << 4) | ((off) & 0xf))
-
 #define ARM_TST_R(rn, rm)      _AL3_R(ARM_INST_TST, 0, rn, rm)
 #define ARM_TST_I(rn, imm)     _AL3_I(ARM_INST_TST, 0, rn, imm)
 
index 8073625371f5d22defae1efe6322a717201e8010..07060e5b58641cc008f41aa927c2e6043ab6afbb 100644 (file)
@@ -59,6 +59,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 
 static __read_mostly unsigned int xen_events_irq;
 
+uint32_t xen_start_flags;
+EXPORT_SYMBOL(xen_start_flags);
+
 int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
                               unsigned long addr,
                               xen_pfn_t *gfn, int nr,
@@ -293,9 +296,7 @@ void __init xen_early_init(void)
        xen_setup_features();
 
        if (xen_feature(XENFEAT_dom0))
-               xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
-       else
-               xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
+               xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
 
        if (!console_set_on_cmdline && !xen_initial_domain())
                add_preferred_console("hvc", 0, NULL);
index e6b059378dc04784927a9b996f24213685bf406a..67dac595dc72ebdeffcd5b6bffd50d115cce8cbc 100644 (file)
@@ -309,8 +309,7 @@ spi0: spi@ffda4000 {
                        interrupts = <0 99 4>;
                        resets = <&rst SPIM0_RESET>;
                        reg-io-width = <4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        status = "disabled";
                };
 
@@ -322,8 +321,7 @@ spi1: spi@ffda5000 {
                        interrupts = <0 100 4>;
                        resets = <&rst SPIM1_RESET>;
                        reg-io-width = <4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        status = "disabled";
                };
 
index 4b3331fbfe39d7b81d9466fb718975b6265c8e5e..dff9b15eb3c0b63a70c65070c465305c35985dee 100644 (file)
@@ -66,9 +66,22 @@ wifi32k: wifi32k {
 
 &ethmac {
        status = "okay";
-       phy-mode = "rgmii";
        pinctrl-0 = <&eth_rgmii_y_pins>;
        pinctrl-names = "default";
+       phy-handle = <&eth_phy0>;
+       phy-mode = "rgmii";
+
+       mdio {
+               compatible = "snps,dwmac-mdio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               eth_phy0: ethernet-phy@0 {
+                       /* Realtek RTL8211F (0x001cc916) */
+                       reg = <0>;
+                       eee-broken-1000t;
+               };
+       };
 };
 
 &uart_A {
index fee87737a201f1121fe7a3ad3cd70c1d20415a0d..67d7115e4effbde75173aa4a4c07ae890b3183c5 100644 (file)
@@ -132,7 +132,7 @@ apb: apb@ffe00000 {
 
                        sd_emmc_b: sd@5000 {
                                compatible = "amlogic,meson-axg-mmc";
-                               reg = <0x0 0x5000 0x0 0x2000>;
+                               reg = <0x0 0x5000 0x0 0x800>;
                                interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                                clocks = <&clkc CLKID_SD_EMMC_B>,
@@ -144,7 +144,7 @@ sd_emmc_b: sd@5000 {
 
                        sd_emmc_c: mmc@7000 {
                                compatible = "amlogic,meson-axg-mmc";
-                               reg = <0x0 0x7000 0x0 0x2000>;
+                               reg = <0x0 0x7000 0x0 0x800>;
                                interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                                clocks = <&clkc CLKID_SD_EMMC_C>,
index 3c31e21cbed7fcdde5bbdf030fcd6c194be5033d..b8dc4dbb391b669fc13eb13b1a24f01d24ab252f 100644 (file)
@@ -35,6 +35,12 @@ secmon_reserved: secmon@10000000 {
                        no-map;
                };
 
+               /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
+               secmon_reserved_alt: secmon@5000000 {
+                       reg = <0x0 0x05000000 0x0 0x300000>;
+                       no-map;
+               };
+
                linux,cma {
                        compatible = "shared-dma-pool";
                        reusable;
@@ -457,21 +463,21 @@ apb: apb@d0000000 {
 
                        sd_emmc_a: mmc@70000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x70000 0x0 0x2000>;
+                               reg = <0x0 0x70000 0x0 0x800>;
                                interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        sd_emmc_b: mmc@72000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x72000 0x0 0x2000>;
+                               reg = <0x0 0x72000 0x0 0x800>;
                                interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        sd_emmc_c: mmc@74000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x74000 0x0 0x2000>;
+                               reg = <0x0 0x74000 0x0 0x800>;
                                interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
index eb327664a4d8c38c196b7cec2dbbe5e5ac2c147f..6aaafff674f97f56625c2da8ea6a5b7dd10eb2d8 100644 (file)
@@ -6,7 +6,7 @@
 
 &apb {
        mali: gpu@c0000 {
-               compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
+               compatible = "amlogic,meson-gxl-mali", "arm,mali-450";
                reg = <0x0 0xc0000 0x0 0x40000>;
                interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
index 3e3eb31748a35a7790a9dc90e56971f004660298..f63bceb88caafa249d84de963c3daa034fb842b7 100644 (file)
@@ -234,9 +234,6 @@ &sd_emmc_b {
 
        bus-width = <4>;
        cap-sd-highspeed;
-       sd-uhs-sdr12;
-       sd-uhs-sdr25;
-       sd-uhs-sdr50;
        max-frequency = <100000000>;
        disable-wp;
 
index 0cfd701809dec578ac31f5f68a7fcfbc21822619..a1b31013ab6e3494d810619fadf81752a67b94f4 100644 (file)
@@ -189,3 +189,10 @@ &uart_AO {
 &usb0 {
        status = "okay";
 };
+
+&usb2_phy0 {
+       /*
+        * HDMI_5V is also used as supply for the USB VBUS.
+        */
+       phy-supply = <&hdmi_5v>;
+};
index 27538eea547b19a0fe8c14a97de4aa303ba63978..c87a80e9bcc6a80bc0f8a59c43a32d6485facafe 100644 (file)
 / {
        compatible = "amlogic,meson-gxl";
 
-       reserved-memory {
-               /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
-               secmon_reserved_alt: secmon@5000000 {
-                       reg = <0x0 0x05000000 0x0 0x300000>;
-                       no-map;
-               };
-       };
-
        soc {
                usb0: usb@c9000000 {
                        status = "disabled";
index 4a2a6af8e752dbbe3a17fa02861fb3603d7c44cb..4057197048dcbbacaee733c6067cc677fd1ad54d 100644 (file)
@@ -118,7 +118,7 @@ pcie0: pcie@20020000 {
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
@@ -149,7 +149,7 @@ pcie4: pcie@50020000 {
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <4>;
 
@@ -566,7 +566,7 @@ i2c0: i2c@66080000 {
                        reg = <0x66080000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
@@ -594,7 +594,7 @@ i2c1: i2c@660b0000 {
                        reg = <0x660b0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
index eb6f08cdbd796c3d764393f9e2e70db2129b0e28..77efa28c4dd53db718b22e64569385f6d92c2feb 100644 (file)
@@ -43,6 +43,10 @@ &gphy0 {
        enet-phy-lane-swap;
 };
 
+&sdio0 {
+       mmc-ddr-1_8v;
+};
+
 &uart2 {
        status = "okay";
 };
index 5084b037320fd9cb65133ca929517062a245af3b..55ba495ef56e1f54b518483bc9e5369fcb03b441 100644 (file)
@@ -42,3 +42,7 @@ / {
 &gphy0 {
        enet-phy-lane-swap;
 };
+
+&sdio0 {
+       mmc-ddr-1_8v;
+};
index 99aaff0b6d72b6bc971863411b80caa3dd165048..b203152ad67ca18b4421bb035b2d13d32d7f9be5 100644 (file)
@@ -409,7 +409,7 @@ i2c0: i2c@b0000 {
                        reg = <0x000b0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 177 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
@@ -453,7 +453,7 @@ i2c1: i2c@e0000 {
                        reg = <0x000e0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 178 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
index 4dd06767f839e07d588fbf34465f828b55949775..a56a408e9bf754802c2f0cfc26633d9f927dff52 100644 (file)
@@ -11,13 +11,14 @@ fman0: fman@1a00000 {
        #size-cells = <1>;
        cell-index = <0>;
        compatible = "fsl,fman";
-       ranges = <0x0 0x0 0x1a00000 0x100000>;
-       reg = <0x0 0x1a00000 0x0 0x100000>;
+       ranges = <0x0 0x0 0x1a00000 0xfe000>;
+       reg = <0x0 0x1a00000 0x0 0xfe000>;
        interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
                     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
        clocks = <&clockgen 3 0>;
        clock-names = "fmanclk";
        fsl,qman-channel-range = <0x800 0x10>;
+       ptimer-handle = <&ptp_timer0>;
 
        muram@0 {
                compatible = "fsl,fman-muram";
@@ -73,9 +74,10 @@ xmdio0: mdio@fd000 {
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xfd000 0x1000>;
        };
+};
 
-       ptp_timer0: ptp-timer@fe000 {
-               compatible = "fsl,fman-ptp-timer";
-               reg = <0xfe000 0x1000>;
-       };
+ptp_timer0: ptp-timer@1afe000 {
+       compatible = "fsl,fman-ptp-timer";
+       reg = <0x0 0x1afe000 0x0 0x1000>;
+       interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
 };
index c6999624ed8abdcf4a7f8cea12635eb0606e6432..68c5a6c819aef2c3fbe8b59aac695cfb0c3a3a77 100644 (file)
@@ -585,6 +585,8 @@ &dwmmc2 { /* WIFI */
        vmmc-supply = <&wlan_en>;
        ti,non-removable;
        non-removable;
+       cap-power-off-card;
+       keep-power-in-suspend;
        #address-cells = <0x1>;
        #size-cells = <0x0>;
        status = "ok";
index edb4ee0b8896b2c9a5572e1160e273eac42e062d..7f12624f6c8e8c6af0a3900f3e7d703a969c6c0e 100644 (file)
@@ -322,6 +322,8 @@ dwmmc_1: dwmmc1@f723e000 {
                dwmmc_2: dwmmc2@f723f000 {
                        bus-width = <0x4>;
                        non-removable;
+                       cap-power-off-card;
+                       keep-power-in-suspend;
                        vmmc-supply = <&reg_vdd_3v3>;
                        mmc-pwrseq = <&wl1835_pwrseq>;
 
index 7dabe25f6774827fd08ec78b3f3793e5b5658177..1c6ff8197a88b1f890fed5b592b9358986942145 100644 (file)
@@ -149,7 +149,7 @@ CP110_LABEL(xmdio): mdio@12a600 {
 
                CP110_LABEL(icu): interrupt-controller@1e0000 {
                        compatible = "marvell,cp110-icu";
-                       reg = <0x1e0000 0x10>;
+                       reg = <0x1e0000 0x440>;
                        #interrupt-cells = <3>;
                        interrupt-controller;
                        msi-parent = <&gicp>;
index 0f829db33efe2dfa2735a7cdf570de77c49a6356..4d5ef01f43a331c456eddf1a324f1e1d450bcea5 100644 (file)
@@ -75,7 +75,7 @@ serial@75b0000 {
 
                serial@75b1000 {
                        label = "LS-UART0";
-                       status = "okay";
+                       status = "disabled";
                        pinctrl-names = "default", "sleep";
                        pinctrl-0 = <&blsp2_uart2_4pins_default>;
                        pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
index 650f356f69ca748f0fbef0c52f4026e43f511e46..c2625d15a8c08f535e6f00f5c8228212c4d7ec5f 100644 (file)
@@ -1191,14 +1191,14 @@ ports {
 
                                port@0 {
                                        reg = <0>;
-                                       etf_out: endpoint {
+                                       etf_in: endpoint {
                                                slave-mode;
                                                remote-endpoint = <&funnel0_out>;
                                        };
                                };
                                port@1 {
                                        reg = <0>;
-                                       etf_in: endpoint {
+                                       etf_out: endpoint {
                                                remote-endpoint = <&replicator_in>;
                                        };
                                };
index 9b4dc41703e38036283aa2a4eededd3322e7a428..ae3b5adf32dfe4a31125880e3a8fa49877c83923 100644 (file)
@@ -54,7 +54,7 @@ amp_vcc_reg: reg-fixed {
        sound {
                compatible = "audio-graph-card";
                label = "UniPhier LD11";
-               widgets = "Headphone", "Headphone Jack";
+               widgets = "Headphone", "Headphones";
                dais = <&i2s_port2
                        &i2s_port3
                        &i2s_port4
index fe6608ea327772e3ad0125c020f8d0102dda35bb..7919233c9ce27e3c86dc8dffce13a97e00112c64 100644 (file)
@@ -54,7 +54,7 @@ amp_vcc_reg: reg-fixed {
        sound {
                compatible = "audio-graph-card";
                label = "UniPhier LD20";
-               widgets = "Headphone", "Headphone Jack";
+               widgets = "Headphone", "Headphones";
                dais = <&i2s_port2
                        &i2s_port3
                        &i2s_port4
index 3cfa8ca267384615694e693ed0371df694fea1f4..f9a186f6af8a9206de939bbdf3f6013988b8b994 100644 (file)
@@ -47,6 +47,7 @@ CONFIG_ARCH_MVEBU=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_ROCKCHIP=y
 CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_SYNQUACER=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_R8A7795=y
 CONFIG_ARCH_R8A7796=y
@@ -58,7 +59,6 @@ CONFIG_ARCH_R8A77995=y
 CONFIG_ARCH_STRATIX10=y
 CONFIG_ARCH_TEGRA=y
 CONFIG_ARCH_SPRD=y
-CONFIG_ARCH_SYNQUACER=y
 CONFIG_ARCH_THUNDER=y
 CONFIG_ARCH_THUNDER2=y
 CONFIG_ARCH_UNIPHIER=y
@@ -67,25 +67,23 @@ CONFIG_ARCH_XGENE=y
 CONFIG_ARCH_ZX=y
 CONFIG_ARCH_ZYNQMP=y
 CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI_PCIE=y
 CONFIG_PCI_IOV=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
-CONFIG_PCI_LAYERSCAPE=y
-CONFIG_PCI_HISI=y
-CONFIG_PCIE_QCOM=y
-CONFIG_PCIE_KIRIN=y
-CONFIG_PCIE_ARMADA_8K=y
-CONFIG_PCIE_HISI_STB=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PCI_TEGRA=y
 CONFIG_PCIE_RCAR=y
-CONFIG_PCIE_ROCKCHIP=y
-CONFIG_PCIE_ROCKCHIP_HOST=m
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_PCI_HOST_THUNDER_PEM=y
 CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_HISI_STB=y
 CONFIG_ARM64_VA_BITS_48=y
 CONFIG_SCHED_MC=y
 CONFIG_NUMA=y
@@ -104,8 +102,6 @@ CONFIG_HIBERNATION=y
 CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
 CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
@@ -113,11 +109,11 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
 CONFIG_CPUFREQ_DT=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
 CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
 CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
 CONFIG_ARM_SCPI_CPUFREQ=y
 CONFIG_ARM_TEGRA186_CPUFREQ=y
-CONFIG_ACPI_CPPC_CPUFREQ=m
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -236,11 +232,6 @@ CONFIG_SMSC911X=y
 CONFIG_SNI_AVE=y
 CONFIG_SNI_NETSEC=y
 CONFIG_STMMAC_ETH=m
-CONFIG_DWMAC_IPQ806X=m
-CONFIG_DWMAC_MESON=m
-CONFIG_DWMAC_ROCKCHIP=m
-CONFIG_DWMAC_SUNXI=m
-CONFIG_DWMAC_SUN8I=m
 CONFIG_MDIO_BUS_MUX_MMIOREG=y
 CONFIG_AT803X_PHY=m
 CONFIG_MARVELL_PHY=m
@@ -269,8 +260,8 @@ CONFIG_WL18XX=m
 CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_ADC=m
-CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=m
 CONFIG_INPUT_MISC=y
@@ -296,17 +287,13 @@ CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=11
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_MVEBU_UART=y
 CONFIG_SERIAL_DEV_BUS=y
-CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
 CONFIG_VIRTIO_CONSOLE=y
-CONFIG_I2C_HID=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_MUX=y
 CONFIG_I2C_MUX_PCA954x=y
@@ -325,26 +312,26 @@ CONFIG_I2C_RCAR=y
 CONFIG_I2C_CROS_EC_TUNNEL=y
 CONFIG_SPI=y
 CONFIG_SPI_ARMADA_3700=y
-CONFIG_SPI_MESON_SPICC=m
-CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_BCM2835=m
 CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
-CONFIG_SPI_QUP=y
 CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_QUP=y
 CONFIG_SPI_S3C64XX=y
 CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
-CONFIG_PINCTRL_IPQ8074=y
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_IPQ8074=y
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_MSM8994=y
 CONFIG_PINCTRL_MSM8996=y
-CONFIG_PINCTRL_MT7622=y
 CONFIG_PINCTRL_QDF2XXX=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_MT7622=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_MB86S7X=y
 CONFIG_GPIO_PL061=y
@@ -368,13 +355,13 @@ CONFIG_SENSORS_INA2XX=m
 CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
 CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_RCAR_GEN3_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
 CONFIG_BRCMSTB_THERMAL=m
 CONFIG_EXYNOS_THERMAL=y
-CONFIG_RCAR_GEN3_THERMAL=y
-CONFIG_QCOM_TSENS=y
-CONFIG_ROCKCHIP_THERMAL=m
 CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_TSENS=y
 CONFIG_UNIPHIER_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
@@ -395,9 +382,9 @@ CONFIG_MFD_MAX77620=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_RK808=y
 CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_AXP20X=y
 CONFIG_REGULATOR_FAN53555=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_HI6421V530=y
 CONFIG_REGULATOR_HI655X=y
@@ -407,16 +394,15 @@ CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_QCOM_SPMI=y
 CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_S2MPS11=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_IR_MESON=m
 CONFIG_MEDIA_SUPPORT=m
 CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
 CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
-CONFIG_MEDIA_RC_SUPPORT=y
-CONFIG_RC_CORE=m
-CONFIG_RC_DEVICES=y
-CONFIG_RC_DECODERS=y
-CONFIG_IR_MESON=m
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 # CONFIG_DVB_NET is not set
 CONFIG_V4L_MEM2MEM_DRIVERS=y
@@ -441,8 +427,7 @@ CONFIG_ROCKCHIP_DW_HDMI=y
 CONFIG_ROCKCHIP_DW_MIPI_DSI=y
 CONFIG_ROCKCHIP_INNO_HDMI=y
 CONFIG_DRM_RCAR_DU=m
-CONFIG_DRM_RCAR_LVDS=y
-CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_RCAR_LVDS=m
 CONFIG_DRM_TEGRA=m
 CONFIG_DRM_PANEL_SIMPLE=m
 CONFIG_DRM_I2C_ADV7511=m
@@ -455,7 +440,6 @@ CONFIG_FB_ARMCLCD=y
 CONFIG_BACKLIGHT_GENERIC=m
 CONFIG_BACKLIGHT_PWM=m
 CONFIG_BACKLIGHT_LP855X=m
-CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
@@ -468,6 +452,7 @@ CONFIG_SND_SOC_RCAR=m
 CONFIG_SND_SOC_AK4613=m
 CONFIG_SND_SIMPLE_CARD=m
 CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_I2C_HID=m
 CONFIG_USB=y
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
@@ -501,12 +486,12 @@ CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_ACPI=y
-CONFIG_MMC_SDHCI_F_SDH30=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_OF_ARASAN=y
 CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_MMC_SDHCI_CADENCE=y
 CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_F_SDH30=y
 CONFIG_MMC_MESON_GX=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_MMC_SPI=y
@@ -524,11 +509,11 @@ CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_PWM=y
 CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_DISK=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_CPU=y
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_LEDS_TRIGGER_PANIC=y
-CONFIG_LEDS_TRIGGER_DISK=y
 CONFIG_EDAC=y
 CONFIG_EDAC_GHES=y
 CONFIG_RTC_CLASS=y
@@ -537,13 +522,13 @@ CONFIG_RTC_DRV_RK808=m
 CONFIG_RTC_DRV_S5M=y
 CONFIG_RTC_DRV_DS3232=y
 CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_CROS_EC=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_ARMADA38X=y
 CONFIG_RTC_DRV_TEGRA=y
 CONFIG_RTC_DRV_XGENE=y
-CONFIG_RTC_DRV_CROS_EC=y
 CONFIG_DMADEVICES=y
 CONFIG_DMA_BCM2835=m
 CONFIG_K3_DMA=y
@@ -579,7 +564,6 @@ CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_ARM_MHU=y
 CONFIG_PLATFORM_MHU=y
 CONFIG_BCM2835_MBOX=y
-CONFIG_HI6220_MBOX=y
 CONFIG_QCOM_APCS_IPC=y
 CONFIG_ROCKCHIP_IOMMU=y
 CONFIG_TEGRA_IOMMU_SMMU=y
@@ -602,7 +586,6 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_EXTCON_USBC_CROS_EC=y
 CONFIG_MEMORY=y
-CONFIG_TEGRA_MC=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
 CONFIG_ROCKCHIP_SARADC=m
@@ -618,27 +601,27 @@ CONFIG_PWM_RCAR=m
 CONFIG_PWM_ROCKCHIP=y
 CONFIG_PWM_SAMSUNG=y
 CONFIG_PWM_TEGRA=m
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_HI6220_USB=y
 CONFIG_PHY_HISTB_COMBPHY=y
 CONFIG_PHY_HISI_INNO_USB2=y
-CONFIG_PHY_RCAR_GEN3_USB2=y
-CONFIG_PHY_RCAR_GEN3_USB3=m
-CONFIG_PHY_HI6220_USB=y
-CONFIG_PHY_QCOM_USB_HS=y
-CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_MVEBU_CP110_COMPHY=y
 CONFIG_PHY_QCOM_QMP=m
-CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
 CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
 CONFIG_PHY_ROCKCHIP_PCIE=m
 CONFIG_PHY_ROCKCHIP_TYPEC=y
-CONFIG_PHY_XGENE=y
 CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_QCOM_L2_PMU=y
 CONFIG_QCOM_L3_PMU=y
-CONFIG_MESON_EFUSE=m
 CONFIG_QCOM_QFPROM=y
 CONFIG_ROCKCHIP_EFUSE=y
 CONFIG_UNIPHIER_EFUSE=y
+CONFIG_MESON_EFUSE=m
 CONFIG_TEE=y
 CONFIG_OPTEE=y
 CONFIG_ARM_SCPI_PROTOCOL=y
@@ -647,7 +630,6 @@ CONFIG_EFI_CAPSULE_LOADER=y
 CONFIG_ACPI=y
 CONFIG_ACPI_APEI=y
 CONFIG_ACPI_APEI_GHES=y
-CONFIG_ACPI_APEI_PCIEAER=y
 CONFIG_ACPI_APEI_MEMORY_FAILURE=y
 CONFIG_ACPI_APEI_EINJ=y
 CONFIG_EXT2_FS=y
@@ -682,7 +664,6 @@ CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
 # CONFIG_FTRACE is not set
@@ -691,20 +672,15 @@ CONFIG_SECURITY=y
 CONFIG_CRYPTO_ECHAINIV=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_ARM64_CRYPTO=y
-CONFIG_CRYPTO_SHA256_ARM64=m
-CONFIG_CRYPTO_SHA512_ARM64=m
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
 CONFIG_CRYPTO_GHASH_ARM64_CE=y
 CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
 CONFIG_CRYPTO_CRC32_ARM64_CE=m
-CONFIG_CRYPTO_AES_ARM64=m
-CONFIG_CRYPTO_AES_ARM64_CE=m
 CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
 CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
-CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
 CONFIG_CRYPTO_CHACHA20_NEON=m
 CONFIG_CRYPTO_AES_ARM64_BS=m
-CONFIG_CRYPTO_SHA512_ARM64_CE=m
-CONFIG_CRYPTO_SHA3_ARM64=m
-CONFIG_CRYPTO_SM3_ARM64_CE=m
index 253188fb8cb0cea0e35d0f4ed77b5e2c6332d507..e3e50950a863675b72a3c1e0d605d81cf5f258f2 100644 (file)
@@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req)
                kernel_neon_begin();
                aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
-               err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
                kernel_neon_end();
+               err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        if (walk.nbytes) {
                u8 __aligned(8) tail[AES_BLOCK_SIZE];
index a91933b1e2e62ba235ef05ddf8f9d34dbb6bcf49..4b650ec1d7dd1aa8d4418b6b896f81de4a2187ab 100644 (file)
@@ -28,7 +28,12 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
                                 __le32 *origptr, __le32 *updptr, int nr_inst);
 
 void __init apply_alternatives_all(void);
-void apply_alternatives(void *start, size_t length);
+
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length);
+#else
+static inline void apply_alternatives_module(void *start, size_t length) { }
+#endif
 
 #define ALTINSTR_ENTRY(feature,cb)                                           \
        " .word 661b - .\n"                             /* label           */ \
index fda9a8ca48bef71b0d4a76be1a45295af1211dd6..fe8777b12f8667c2c0b23952057fc13041276442 100644 (file)
@@ -306,6 +306,7 @@ struct kvm_vcpu_arch {
 #define KVM_ARM64_FP_ENABLED           (1 << 1) /* guest FP regs loaded */
 #define KVM_ARM64_FP_HOST              (1 << 2) /* host FP regs loaded */
 #define KVM_ARM64_HOST_SVE_IN_USE      (1 << 3) /* backup for host TIF_SVE */
+#define KVM_ARM64_HOST_SVE_ENABLED     (1 << 4) /* SVE enabled for EL0 */
 
 #define vcpu_gp_regs(v)                (&(v)->arch.ctxt.gp_regs)
 
index 9f82d6b53851e4b6bedbb28f6d0e7480acd622a6..1bdeca8918a684814f84ca3841b88a3123749cbb 100644 (file)
@@ -224,10 +224,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
         * Only if the new pte is valid and kernel, otherwise TLB maintenance
         * or update_mmu_cache() have the necessary barriers.
         */
-       if (pte_valid_not_user(pte)) {
+       if (pte_valid_not_user(pte))
                dsb(ishst);
-               isb();
-       }
 }
 
 extern void __sync_icache_dcache(pte_t pteval);
@@ -434,7 +432,6 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
        WRITE_ONCE(*pmdp, pmd);
        dsb(ishst);
-       isb();
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -485,7 +482,6 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
 {
        WRITE_ONCE(*pudp, pud);
        dsb(ishst);
-       isb();
 }
 
 static inline void pud_clear(pud_t *pudp)
index 6171178075dcab62def613141732a0b7601b1c43..a8f84812c6e8925c9429451dc3119bfbd5620e8c 100644 (file)
@@ -728,6 +728,17 @@ asm(
        asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
 } while (0)
 
+/*
+ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
+ * set mask are set. Other bits are left as-is.
+ */
+#define sysreg_clear_set(sysreg, clear, set) do {                      \
+       u64 __scs_val = read_sysreg(sysreg);                            \
+       u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set);            \
+       if (__scs_new != __scs_val)                                     \
+               write_sysreg(__scs_new, sysreg);                        \
+} while (0)
+
 static inline void config_sctlr_el1(u32 clear, u32 set)
 {
        u32 val;
index 5c4bce4ac381a4ab87107e4aa47a9b7beef7d891..36fb069fd049c7053f38b75b9916bba7cb630643 100644 (file)
@@ -122,7 +122,30 @@ static void patch_alternative(struct alt_instr *alt,
        }
 }
 
-static void __apply_alternatives(void *alt_region, bool use_linear_alias)
+/*
+ * We provide our own, private D-cache cleaning function so that we don't
+ * accidentally call into the cache.S code, which is patched by us at
+ * runtime.
+ */
+static void clean_dcache_range_nopatch(u64 start, u64 end)
+{
+       u64 cur, d_size, ctr_el0;
+
+       ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
+       d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
+                                                          CTR_DMINLINE_SHIFT);
+       cur = start & ~(d_size - 1);
+       do {
+               /*
+                * We must clean+invalidate to the PoC in order to avoid
+                * Cortex-A53 errata 826319, 827319, 824069 and 819472
+                * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
+                */
+               asm volatile("dc civac, %0" : : "r" (cur) : "memory");
+       } while (cur += d_size, cur < end);
+}
+
+static void __apply_alternatives(void *alt_region, bool is_module)
 {
        struct alt_instr *alt;
        struct alt_region *region = alt_region;
@@ -145,7 +168,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
                pr_info_once("patching kernel code\n");
 
                origptr = ALT_ORIG_PTR(alt);
-               updptr = use_linear_alias ? lm_alias(origptr) : origptr;
+               updptr = is_module ? origptr : lm_alias(origptr);
                nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
 
                if (alt->cpufeature < ARM64_CB_PATCH)
@@ -155,8 +178,20 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
 
                alt_cb(alt, origptr, updptr, nr_inst);
 
-               flush_icache_range((uintptr_t)origptr,
-                                  (uintptr_t)(origptr + nr_inst));
+               if (!is_module) {
+                       clean_dcache_range_nopatch((u64)origptr,
+                                                  (u64)(origptr + nr_inst));
+               }
+       }
+
+       /*
+        * The core module code takes care of cache maintenance in
+        * flush_module_icache().
+        */
+       if (!is_module) {
+               dsb(ish);
+               __flush_icache_all();
+               isb();
        }
 }
 
@@ -178,7 +213,7 @@ static int __apply_alternatives_multi_stop(void *unused)
                isb();
        } else {
                BUG_ON(alternatives_applied);
-               __apply_alternatives(&region, true);
+               __apply_alternatives(&region, false);
                /* Barriers provided by the cache flushing */
                WRITE_ONCE(alternatives_applied, 1);
        }
@@ -192,12 +227,14 @@ void __init apply_alternatives_all(void)
        stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
 }
 
-void apply_alternatives(void *start, size_t length)
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length)
 {
        struct alt_region region = {
                .begin  = start,
                .end    = start + length,
        };
 
-       __apply_alternatives(&region, false);
+       __apply_alternatives(&region, true);
 }
+#endif
index d2856b129097899d37ba3790056fc28eefc8409e..f24892a40d2c8abd934dcb2dc133c4a136270d31 100644 (file)
@@ -937,7 +937,7 @@ static int __init parse_kpti(char *str)
        __kpti_forced = enabled ? 1 : -1;
        return 0;
 }
-__setup("kpti=", parse_kpti);
+early_param("kpti", parse_kpti);
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #ifdef CONFIG_ARM64_HW_AFDBM
index 155fd91e78f4a62180e7577355ca4a6b0eb283f4..f0f27aeefb73623a0983c1f3eec2054d306021dc 100644 (file)
@@ -448,9 +448,8 @@ int module_finalize(const Elf_Ehdr *hdr,
        const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
        for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
-               if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
-                       apply_alternatives((void *)s->sh_addr, s->sh_size);
-               }
+               if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
+                       apply_alternatives_module((void *)s->sh_addr, s->sh_size);
 #ifdef CONFIG_ARM64_MODULE_PLTS
                if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
                    !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
index f3e2e3aec0b0632793abc2ce06dbaa2addd97eb5..2faa9863d2e569e704191bd1939dac2eb111cb5b 100644 (file)
@@ -179,7 +179,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
  */
-asmlinkage void secondary_start_kernel(void)
+asmlinkage notrace void secondary_start_kernel(void)
 {
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        struct mm_struct *mm = &init_mm;
index dc6ecfa5a2d2564c90a5ce92003a0e3b8490cbce..aac7808ce2162a9d2bdcdcc938b649655663912e 100644 (file)
@@ -5,13 +5,14 @@
  * Copyright 2018 Arm Limited
  * Author: Dave Martin <Dave.Martin@arm.com>
  */
-#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
 #include <linux/sched.h>
 #include <linux/thread_info.h>
 #include <linux/kvm_host.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_host.h>
 #include <asm/kvm_mmu.h>
+#include <asm/sysreg.h>
 
 /*
  * Called on entry to KVM_RUN unless this vcpu previously ran at least
@@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
 {
        BUG_ON(!current->mm);
 
-       vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
+       vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
+                             KVM_ARM64_HOST_SVE_IN_USE |
+                             KVM_ARM64_HOST_SVE_ENABLED);
        vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+
        if (test_thread_flag(TIF_SVE))
                vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
+
+       if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+               vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
 }
 
 /*
@@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
  */
 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
 {
-       local_bh_disable();
+       unsigned long flags;
 
-       update_thread_flag(TIF_SVE,
-                          vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+       local_irq_save(flags);
 
        if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
                /* Clean guest FP state to memory and invalidate cpu view */
                fpsimd_save();
                fpsimd_flush_cpu_state();
-       } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
-               /* Ensure user trap controls are correctly restored */
-               fpsimd_bind_task_to_cpu();
+       } else if (system_supports_sve()) {
+               /*
+                * The FPSIMD/SVE state in the CPU has not been touched, and we
+                * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
+                * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
+                * for EL0.  To avoid spurious traps, restore the trap state
+                * seen by kvm_arch_vcpu_load_fp():
+                */
+               if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
+                       sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
+               else
+                       sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
        }
 
-       local_bh_enable();
+       update_thread_flag(TIF_SVE,
+                          vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+
+       local_irq_restore(flags);
 }
index 49e217ac7e1ec2087c440c60ec71126f0e48ec32..61e93f0b548228f57a08f25a14291a1e46437115 100644 (file)
@@ -583,13 +583,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                                                    size >> PAGE_SHIFT);
                        return NULL;
                }
-               if (!coherent)
-                       __dma_flush_area(page_to_virt(page), iosize);
-
                addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
                                                   prot,
                                                   __builtin_return_address(0));
-               if (!addr) {
+               if (addr) {
+                       memset(addr, 0, size);
+                       if (!coherent)
+                               __dma_flush_area(page_to_virt(page), iosize);
+               } else {
                        iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
                        dma_release_from_contiguous(dev, page,
                                                    size >> PAGE_SHIFT);
index 5f9a73a4452c2b87dd9a922933b12c85ab008377..03646e6a2ef4f240412d1eb62a1cbc27d04705b0 100644 (file)
@@ -217,8 +217,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
 
        .macro __idmap_kpti_put_pgtable_ent_ng, type
        orr     \type, \type, #PTE_NG           // Same bit for blocks and pages
-       str     \type, [cur_\()\type\()p]       // Update the entry and ensure it
-       dc      civac, cur_\()\type\()p         // is visible to all CPUs.
+       str     \type, [cur_\()\type\()p]       // Update the entry and ensure
+       dmb     sy                              // that it is visible to all
+       dc      civac, cur_\()\type\()p         // CPUs.
        .endm
 
 /*
index 3efba40adc5411ce5981ce611390c084ce7eddf0..c872c4e6bafb64c4334810f73475eb21845481a6 100644 (file)
 
 #define SO_ZEROCOPY            60
 
+#define SO_TXTIME              61
+#define SCM_TXTIME             SO_TXTIME
+
 #endif /* _ASM_IA64_SOCKET_H */
index 331a3bb66297baa39404fbefa273663ebd1871fe..93a737c8d1a6448d5bb4dcb2d71c8d8b5241e0d7 100644 (file)
@@ -8,11 +8,4 @@ config TRACE_IRQFLAGS_SUPPORT
 
 source "lib/Kconfig.debug"
 
-config HEART_BEAT
-       bool "Heart beat function for kernel"
-       default n
-       help
-         This option turns on/off heart beat kernel functionality.
-         First GPIO node is taken.
-
 endmenu
index d5384f6f36f777d4487ea00f2908b39fe5a26403..ce9b7b7861569501c0339491338da38a2cdb0050 100644 (file)
@@ -19,15 +19,10 @@ extern char cmd_line[COMMAND_LINE_SIZE];
 
 extern char *klimit;
 
-void microblaze_heartbeat(void);
-void microblaze_setup_heartbeat(void);
-
 #   ifdef CONFIG_MMU
 extern void mmu_reset(void);
 #   endif /* CONFIG_MMU */
 
-extern void of_platform_reset_gpio_probe(void);
-
 void time_init(void);
 void init_IRQ(void);
 void machine_early_init(const char *cmdline, unsigned int ram,
index 9774e1d9507baebbd6efe9bcf67bb88bcd214d82..a62d09420a47b725cf67e12b99784a2259e24d2f 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         399
+#define __NR_syscalls         401
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index eb156f914793b29b558c9b48853af8d833f3d3d2..7a9f16a7641374855d4d8f9a7189792031f51185 100644 (file)
 #define __NR_pkey_alloc                396
 #define __NR_pkey_free         397
 #define __NR_statx             398
+#define __NR_io_pgetevents     399
+#define __NR_rseq              400
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index 7e99cf6984a1eb5f51597dbd8857f6f370d28328..dd71637437f4f6b1ff307d385b8a1ff293959075 100644 (file)
@@ -8,7 +8,6 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_timer.o = -pg
 CFLAGS_REMOVE_intc.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
-CFLAGS_REMOVE_heartbeat.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_process.o = -pg
 endif
@@ -17,12 +16,11 @@ extra-y := head.o vmlinux.lds
 
 obj-y += dma.o exceptions.o \
        hw_exception_handler.o irq.o \
-       platform.o process.o prom.o ptrace.o \
+       process.o prom.o ptrace.o \
        reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
 
 obj-y += cpu/
 
-obj-$(CONFIG_HEART_BEAT)       += heartbeat.o
 obj-$(CONFIG_MODULES)          += microblaze_ksyms.o module.o
 obj-$(CONFIG_MMU)              += misc.o
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
deleted file mode 100644 (file)
index 2022130..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/loadavg.h>
-#include <linux/io.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-
-static unsigned int base_addr;
-
-void microblaze_heartbeat(void)
-{
-       static unsigned int cnt, period, dist;
-
-       if (base_addr) {
-               if (cnt == 0 || cnt == dist)
-                       out_be32(base_addr, 1);
-               else if (cnt == 7 || cnt == dist + 7)
-                       out_be32(base_addr, 0);
-
-               if (++cnt > period) {
-                       cnt = 0;
-                       /*
-                        * The hyperbolic function below modifies the heartbeat
-                        * period length in dependency of the current (5min)
-                        * load. It goes through the points f(0)=126, f(1)=86,
-                        * f(5)=51, f(inf)->30.
-                        */
-                       period = ((672 << FSHIFT) / (5 * avenrun[0] +
-                                               (7 << FSHIFT))) + 30;
-                       dist = period / 4;
-               }
-       }
-}
-
-void microblaze_setup_heartbeat(void)
-{
-       struct device_node *gpio = NULL;
-       int *prop;
-       int j;
-       const char * const gpio_list[] = {
-               "xlnx,xps-gpio-1.00.a",
-               NULL
-       };
-
-       for (j = 0; gpio_list[j] != NULL; j++) {
-               gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]);
-               if (gpio)
-                       break;
-       }
-
-       if (gpio) {
-               base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL));
-               base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
-               pr_notice("Heartbeat GPIO at 0x%x\n", base_addr);
-
-               /* GPIO is configured as output */
-               prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
-               if (prop)
-                       out_be32(base_addr + 4, 0);
-       }
-}
diff --git a/arch/microblaze/kernel/platform.c b/arch/microblaze/kernel/platform.c
deleted file mode 100644 (file)
index 2540d60..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2008 Michal Simek <monstr@monstr.eu>
- *
- * based on virtex.c file
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <asm/setup.h>
-
-static struct of_device_id xilinx_of_bus_ids[] __initdata = {
-       { .compatible = "simple-bus", },
-       { .compatible = "xlnx,compound", },
-       {}
-};
-
-static int __init microblaze_device_probe(void)
-{
-       of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
-       of_platform_reset_gpio_probe();
-       return 0;
-}
-device_initcall(microblaze_device_probe);
index bab4c8330ef4f3f165ad2992d9660776fb0e3c41..fcbe1daf631662f8d45a580126f58d2090d90023 100644 (file)
@@ -18,7 +18,7 @@
 static int handle; /* reset pin handle */
 static unsigned int reset_val;
 
-void of_platform_reset_gpio_probe(void)
+static int of_platform_reset_gpio_probe(void)
 {
        int ret;
        handle = of_get_named_gpio(of_find_node_by_path("/"),
@@ -27,13 +27,13 @@ void of_platform_reset_gpio_probe(void)
        if (!gpio_is_valid(handle)) {
                pr_info("Skipping unavailable RESET gpio %d (%s)\n",
                                handle, "reset");
-               return;
+               return -ENODEV;
        }
 
        ret = gpio_request(handle, "reset");
        if (ret < 0) {
                pr_info("GPIO pin is already allocated\n");
-               return;
+               return ret;
        }
 
        /* get current setup value */
@@ -51,11 +51,12 @@ void of_platform_reset_gpio_probe(void)
 
        pr_info("RESET: Registered gpio device: %d, current val: %d\n",
                                                        handle, reset_val);
-       return;
+       return 0;
 err:
        gpio_free(handle);
-       return;
+       return ret;
 }
+device_initcall(of_platform_reset_gpio_probe);
 
 
 static void gpio_system_reset(void)
index 56bcf313121fb6bd31be14dc5d9f676a132cb85c..6ab6505937921e247231f8a09fbf5ab8b463d1a5 100644 (file)
@@ -400,3 +400,5 @@ ENTRY(sys_call_table)
        .long sys_pkey_alloc
        .long sys_pkey_free
        .long sys_statx
+       .long sys_io_pgetevents
+       .long sys_rseq
index 7de941cbbd940fb7ca72840e3bf6422993c6eb94..a6683484b3a12690c517ccf5803ac64f03f16c81 100644 (file)
@@ -156,9 +156,6 @@ static inline void timer_ack(void)
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = &clockevent_xilinx_timer;
-#ifdef CONFIG_HEART_BEAT
-       microblaze_heartbeat();
-#endif
        timer_ack();
        evt->event_handler(evt);
        return IRQ_HANDLED;
@@ -318,10 +315,6 @@ static int __init xilinx_timer_init(struct device_node *timer)
                return ret;
        }
 
-#ifdef CONFIG_HEART_BEAT
-       microblaze_setup_heartbeat();
-#endif
-
        ret = xilinx_clocksource_init();
        if (ret)
                return ret;
index 3f9deec70b92383130b847ef3d9585db5134675e..08c10c518f8323fea7838d92fe03b748cbb2966e 100644 (file)
@@ -65,6 +65,7 @@ config MIPS
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
index 6b2c6f3baefa556018dffea409500b1c7846ed77..75fb96ca61db7ef6652722640a8d452cbc125d55 100644 (file)
@@ -34,7 +34,7 @@
 #define PB44_KEYS_DEBOUNCE_INTERVAL    (3 * PB44_KEYS_POLL_INTERVAL)
 
 static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
-       .dev_id = "i2c-gpio",
+       .dev_id = "i2c-gpio.0",
        .table = {
                GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
                                NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
index 6054d49e608eec038e1bbd49599bc783270aa09a..8c9cbf13d32a0a471bc6f2653bbb3c459b1b2c2c 100644 (file)
@@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void)
                 */
                if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
                        cpu_wait = NULL;
+
+               /*
+                * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
+                * Enable ExternalSync for sync instruction to take effect
+                */
+               set_c0_config7(MIPS_CONF7_ES);
                break;
 #endif
        }
index a7d0b836f2f7dd9c8bf7897759aed6b9f59ade39..cea8ad864b3f6f416cb45687bfbcb5bd882933a7 100644 (file)
@@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port)                     \
        __val = *__addr;                                                \
        slow;                                                           \
                                                                        \
+       /* prevent prefetching of coherent DMA data prematurely */      \
+       rmb();                                                          \
        return pfx##ioswab##bwlq(__addr, __val);                        \
 }
 
index ae461d91cd1faef06dc39399e7910eb28471d930..0bc270806ec5a68dbb030d6f47d26e46485c0ea0 100644 (file)
 #define MIPS_CONF7_WII         (_ULCAST_(1) << 31)
 
 #define MIPS_CONF7_RPS         (_ULCAST_(1) << 2)
+/* ExternalSync */
+#define MIPS_CONF7_ES          (_ULCAST_(1) << 8)
 
 #define MIPS_CONF7_IAR         (_ULCAST_(1) << 10)
 #define MIPS_CONF7_AR          (_ULCAST_(1) << 16)
@@ -2765,6 +2767,7 @@ __BUILD_SET_C0(status)
 __BUILD_SET_C0(cause)
 __BUILD_SET_C0(config)
 __BUILD_SET_C0(config5)
+__BUILD_SET_C0(config7)
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
index 49c3d47959637a8653067bc903fbcb8b0bb16e1e..71370fb3ceef4ee4c235876bd1ac7056cff7f1e8 100644 (file)
 
 #define SO_ZEROCOPY            60
 
+#define SO_TXTIME              61
+#define SCM_TXTIME             SO_TXTIME
+
 #endif /* _UAPI_ASM_SOCKET_H */
index bb05e9916a5fa7f969d915b742329ae67cd51b57..f25dd1d83fb74700b33e4bf2387ebf89ac200f64 100644 (file)
 #define __NR_pkey_alloc                        (__NR_Linux + 364)
 #define __NR_pkey_free                 (__NR_Linux + 365)
 #define __NR_statx                     (__NR_Linux + 366)
+#define __NR_rseq                      (__NR_Linux + 367)
+#define __NR_io_pgetevents             (__NR_Linux + 368)
 
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            366
+#define __NR_Linux_syscalls            368
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                366
+#define __NR_O32_Linux_syscalls                368
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_pkey_alloc                        (__NR_Linux + 324)
 #define __NR_pkey_free                 (__NR_Linux + 325)
 #define __NR_statx                     (__NR_Linux + 326)
+#define __NR_rseq                      (__NR_Linux + 327)
+#define __NR_io_pgetevents             (__NR_Linux + 328)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            326
+#define __NR_Linux_syscalls            328
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         326
+#define __NR_64_Linux_syscalls         328
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_pkey_alloc                        (__NR_Linux + 328)
 #define __NR_pkey_free                 (__NR_Linux + 329)
 #define __NR_statx                     (__NR_Linux + 330)
+#define __NR_rseq                      (__NR_Linux + 331)
+#define __NR_io_pgetevents             (__NR_Linux + 332)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            330
+#define __NR_Linux_syscalls            332
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                330
+#define __NR_N32_Linux_syscalls                332
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 38a302919e6b5ae8aa07303065bd561529fe828e..d7de8adcfcc8767a826e7823d3bf189326da0e33 100644 (file)
@@ -79,6 +79,10 @@ FEXPORT(ret_from_fork)
        jal     schedule_tail           # a0 = struct task_struct *prev
 
 FEXPORT(syscall_exit)
+#ifdef CONFIG_DEBUG_RSEQ
+       move    a0, sp
+       jal     rseq_syscall
+#endif
        local_irq_disable               # make sure need_resched and
                                        # signals dont change between
                                        # sampling and return
@@ -141,6 +145,10 @@ work_notifysig:                            # deal with pending signals and
        j       resume_userspace_check
 
 FEXPORT(syscall_exit_partial)
+#ifdef CONFIG_DEBUG_RSEQ
+       move    a0, sp
+       jal     rseq_syscall
+#endif
        local_irq_disable               # make sure need_resched doesn't
                                        # change between and return
        LONG_L  a2, TI_FLAGS($28)       # current->work
index f2ee7e1e3342e498be961f8995fc91b1de1f2744..cff52b283e03843519201ca8fe8754e0899c0c3c 100644 (file)
@@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra)
 EXPORT_SYMBOL(_mcount)
        PTR_LA  t1, ftrace_stub
        PTR_L   t2, ftrace_trace_function /* Prepare t2 for (1) */
-       bne     t1, t2, static_trace
+       beq     t1, t2, fgraph_trace
         nop
 
+       MCOUNT_SAVE_REGS
+
+       move    a0, ra          /* arg1: self return address */
+       jalr    t2              /* (1) call *ftrace_trace_function */
+        move   a1, AT          /* arg2: parent's return address */
+
+       MCOUNT_RESTORE_REGS
+
+fgraph_trace:
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       PTR_LA  t1, ftrace_stub
        PTR_L   t3, ftrace_graph_return
        bne     t1, t3, ftrace_graph_caller
         nop
@@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount)
        bne     t1, t3, ftrace_graph_caller
         nop
 #endif
-       b       ftrace_stub
-#ifdef CONFIG_32BIT
-        addiu sp, sp, 8
-#else
-        nop
-#endif
 
-static_trace:
-       MCOUNT_SAVE_REGS
-
-       move    a0, ra          /* arg1: self return address */
-       jalr    t2              /* (1) call *ftrace_trace_function */
-        move   a1, AT          /* arg2: parent's return address */
-
-       MCOUNT_RESTORE_REGS
 #ifdef CONFIG_32BIT
        addiu sp, sp, 8
 #endif
+
        .globl ftrace_stub
 ftrace_stub:
        RETURN_BACK
index a9a7d78803cde30097a02c76aa49bef9f812be7e..91d3c8c46097cd960fd541cdf7c76d7e0d3636e3 100644 (file)
@@ -590,3 +590,5 @@ EXPORT(sys_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 4365 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     sys_io_pgetevents
index 65d5aeeb9bdb51ac846d5acc213f3a1af9b97533..358d9599983d17840cd909ea7851197e7b38b838 100644 (file)
@@ -439,4 +439,6 @@ EXPORT(sys_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 5325 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     sys_io_pgetevents
        .size   sys_call_table,.-sys_call_table
index cbf190ef9e8a5e2a0e499cfaf721908abf4213ec..c65eaacc1abfcf4c15a40721056cf6c3503927ee 100644 (file)
@@ -434,4 +434,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free
        PTR     sys_statx                       /* 6330 */
+       PTR     sys_rseq
+       PTR     compat_sys_io_pgetevents
        .size   sysn32_call_table,.-sysn32_call_table
index 9ebe3e2403b1d7b84d66732cd261364208f6020d..73913f072e3916f36c23bda86870f83002a725c0 100644 (file)
@@ -583,4 +583,6 @@ EXPORT(sys32_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 4365 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     compat_sys_io_pgetevents
        .size   sys32_call_table,.-sys32_call_table
index 9e224469c78887e9c2eb55779bfc8d4646ca2f09..0a9cfe7a0372940fceb71931cff1eddec2e10e37 100644 (file)
@@ -801,6 +801,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
                regs->regs[0] = 0;              /* Don't deal with this again.  */
        }
 
+       rseq_signal_deliver(ksig, regs);
+
        if (sig_uses_siginfo(&ksig->ka, abi))
                ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
                                          ksig, regs, oldset);
@@ -868,6 +870,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
+               rseq_handle_notify_resume(NULL, regs);
        }
 
        user_enter();
index 3e1a46615120a566adbfff65b5aab8e860bf3809..8999b922651210f6c20c83e7aa72b3bccf6c3d58 100644 (file)
@@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
        __free_page(pte);
 }
 
+#define __pte_free_tlb(tlb, pte, addr) \
+do {                                   \
+       pgtable_page_dtor(pte);         \
+       tlb_remove_page((tlb), (pte));  \
+} while (0)
 
-#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
 #define check_pgt_cache()          do { } while (0)
index 690d55272ba688a2adc88bca00e66cc61903c711..0c826ad6e994cce359474229acf08ff0d0330b78 100644 (file)
@@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
        l.addi  r3,r1,0                    // pt_regs
        /* r4 set be EXCEPTION_HANDLE */   // effective address of fault
 
-       /*
-        * __PHX__: TODO
-        *
-        * all this can be written much simpler. look at
-        * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
-        */
 #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
        l.lwz   r6,PT_PC(r3)               // address of an offending insn
        l.lwz   r6,0(r6)                   // instruction that caused pf
@@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
 
 #else
 
-       l.lwz   r6,PT_SR(r3)               // SR
+       l.mfspr r6,r0,SPR_SR               // SR
        l.andi  r6,r6,SPR_SR_DSX           // check for delay slot exception
        l.sfne  r6,r0                      // exception happened in delay slot
        l.bnf   7f
index fb02b2a1d6f2d875372b125cf837feb119d0164e..9fc6b60140f007bea1442f60727a22aee24776c9 100644 (file)
  *      r4  - EEAR     exception EA
  *      r10 - current  pointing to current_thread_info struct
  *      r12 - syscall  0, since we didn't come from syscall
- *      r13 - temp     it actually contains new SR, not needed anymore
- *      r31 - handler  address of the handler we'll jump to
+ *      r30 - handler  address of the handler we'll jump to
  *
  *      handler has to save remaining registers to the exception
  *      ksp frame *before* tainting them!
        /* r1 is KSP, r30 is __pa(KSP) */                       ;\
        tophys  (r30,r1)                                        ;\
        l.sw    PT_GPR12(r30),r12                               ;\
+       /* r4 use for tmp before EA */                          ;\
        l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
        l.sw    PT_PC(r30),r12                                  ;\
        l.mfspr r12,r0,SPR_ESR_BASE                             ;\
        /* r12 == 1 if we come from syscall */                  ;\
        CLEAR_GPR(r12)                                          ;\
        /* ----- turn on MMU ----- */                           ;\
-       l.ori   r30,r0,(EXCEPTION_SR)                           ;\
+       /* Carry DSX into exception SR */                       ;\
+       l.mfspr r30,r0,SPR_SR                                   ;\
+       l.andi  r30,r30,SPR_SR_DSX                              ;\
+       l.ori   r30,r30,(EXCEPTION_SR)                          ;\
        l.mtspr r0,r30,SPR_ESR_BASE                             ;\
        /* r30: EA address of handler */                        ;\
        LOAD_SYMBOL_2_GPR(r30,handler)                          ;\
index fac246e6f37a278e4cd7c001c2cd53a8df88dc4e..d8981cbb852a5f1fc1ea80667df3ed451579d13c 100644 (file)
@@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
                return 0;
        }
 #else
-       return regs->sr & SPR_SR_DSX;
+       return mfspr(SPR_SR) & SPR_SR_DSX;
 #endif
 }
 
index c480770fabcd6287571dacb9d40ccc224f8e13b1..17526bebcbd277765c791b30e04e0096052d78cb 100644 (file)
@@ -244,11 +244,11 @@ config PARISC_PAGE_SIZE_4KB
 
 config PARISC_PAGE_SIZE_16KB
        bool "16KB"
-       depends on PA8X00
+       depends on PA8X00 && BROKEN
 
 config PARISC_PAGE_SIZE_64KB
        bool "64KB"
-       depends on PA8X00
+       depends on PA8X00 && BROKEN
 
 endchoice
 
@@ -347,7 +347,7 @@ config NR_CPUS
        int "Maximum number of CPUs (2-32)"
        range 2 32
        depends on SMP
-       default "32"
+       default "4"
 
 endmenu
 
index 714284ea6cc214f1011c6e0593f5ad2b0c962ddc..5ce030266e7d03bbfd7da5885471b1a874eefcd7 100644 (file)
@@ -65,10 +65,6 @@ endif
 # kernel.
 cflags-y       += -mdisable-fpregs
 
-# Without this, "ld -r" results in .text sections that are too big
-# (> 0x40000) for branches to reach stubs.
-cflags-y       += -ffunction-sections
-
 # Use long jumps instead of long branches (needed if your linker fails to
 # link a too big vmlinux executable). Not enabled for building modules.
 ifdef CONFIG_MLONGCALLS
index eeb5c88586631e8935b96e0edfe410bbbc2ecffc..715c96ba2ec81c2907ead07ffd21fbf79a0fb0cb 100644 (file)
@@ -21,14 +21,6 @@ typedef struct {
        unsigned long sig[_NSIG_WORDS];
 } sigset_t;
 
-#ifndef __KERNEL__
-struct sigaction {
-       __sighandler_t sa_handler;
-       unsigned long sa_flags;
-       sigset_t sa_mask;               /* mask last for extensibility */
-};
-#endif
-
 #include <asm/sigcontext.h>
 
 #endif /* !__ASSEMBLY */
index 1d0fdc3b5d228279b3863bd581f56fc8ed2cbe9c..061b9cf2a77988a6b82eb47f3d1fd1a53e3264a2 100644 (file)
 
 #define SO_ZEROCOPY            0x4035
 
+#define SO_TXTIME              0x4036
+#define SCM_TXTIME             SO_TXTIME
+
 #endif /* _UAPI_ASM_SOCKET_H */
index 4872e77aa96b784d5a1e19bd7f9c4996b8cd0992..dc77c5a51db774a7c691568c010ce0a4500e7286 100644 (file)
 #define __NR_preadv2           (__NR_Linux + 347)
 #define __NR_pwritev2          (__NR_Linux + 348)
 #define __NR_statx             (__NR_Linux + 349)
+#define __NR_io_pgetevents     (__NR_Linux + 350)
 
-#define __NR_Linux_syscalls    (__NR_statx + 1)
+#define __NR_Linux_syscalls    (__NR_io_pgetevents + 1)
 
 
 #define __IGNORE_select                /* newselect */
index e0e1c9775c320b46d85da0f2e6ce22bc2275b9fb..5eb979d04b905420e28f63dd526e6ca13aaa9842 100644 (file)
@@ -154,17 +154,14 @@ int register_parisc_driver(struct parisc_driver *driver)
 {
        /* FIXME: we need this because apparently the sti
         * driver can be registered twice */
-       if(driver->drv.name) {
-               printk(KERN_WARNING 
-                      "BUG: skipping previously registered driver %s\n",
-                      driver->name);
+       if (driver->drv.name) {
+               pr_warn("BUG: skipping previously registered driver %s\n",
+                       driver->name);
                return 1;
        }
 
        if (!driver->probe) {
-               printk(KERN_WARNING 
-                      "BUG: driver %s has no probe routine\n",
-                      driver->name);
+               pr_warn("BUG: driver %s has no probe routine\n", driver->name);
                return 1;
        }
 
@@ -491,12 +488,9 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
 
        dev = create_parisc_device(mod_path);
        if (dev->id.hw_type != HPHW_FAULTY) {
-               printk(KERN_ERR "Two devices have hardware path [%s].  "
-                               "IODC data for second device: "
-                               "%02x%02x%02x%02x%02x%02x\n"
-                               "Rearranging GSC cards sometimes helps\n",
-                       parisc_pathname(dev), iodc_data[0], iodc_data[1],
-                       iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
+               pr_err("Two devices have hardware path [%s].  IODC data for second device: %7phN\n"
+                      "Rearranging GSC cards sometimes helps\n",
+                       parisc_pathname(dev), iodc_data);
                return NULL;
        }
 
@@ -528,8 +522,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
         * the keyboard controller
         */
        if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa))
-               printk("Unable to claim HPA %lx for device %s\n",
-                               hpa, name);
+               pr_warn("Unable to claim HPA %lx for device %s\n", hpa, name);
 
        return dev;
 }
@@ -875,7 +868,7 @@ static void print_parisc_device(struct parisc_device *dev)
        static int count;
 
        print_pa_hwpath(dev, hw_path);
-       printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+       pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
                ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
                dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
 
index 6308749359e4b7d6ee348062d584f7b747f1a115..fe3f2a49d2b1063a93daa0a9d4077d2978c5bdaf 100644 (file)
        ENTRY_COMP(preadv2)
        ENTRY_COMP(pwritev2)
        ENTRY_SAME(statx)
+       ENTRY_COMP(io_pgetevents)       /* 350 */
 
 
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
index 143f90e2f9f3c631616d4af52f0fe3fa08f44af9..2ef83d78eec42bd3ad55a3c2e0f976e081417266 100644 (file)
@@ -25,7 +25,7 @@
 
 /* #define DEBUG 1 */
 #ifdef DEBUG
-#define dbg(x...) printk(x)
+#define dbg(x...) pr_debug(x)
 #else
 #define dbg(x...)
 #endif
@@ -182,7 +182,7 @@ int __init unwind_init(void)
        start = (long)&__start___unwind[0];
        stop = (long)&__stop___unwind[0];
 
-       printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", 
+       dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
            start, stop,
            (stop - start) / sizeof(struct unwind_table_entry));
 
index bd06a3ccda312a0a645cd0dbff887924f691d2ce..2ea575cb3401248c1cb97f9596c9e7079c3256b2 100644 (file)
@@ -244,6 +244,7 @@ cpu-as-$(CONFIG_4xx)                += -Wa,-m405
 cpu-as-$(CONFIG_ALTIVEC)       += $(call as-option,-Wa$(comma)-maltivec)
 cpu-as-$(CONFIG_E200)          += -Wa,-me200
 cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
+cpu-as-$(CONFIG_PPC_E500MC)    += $(call as-option,-Wa$(comma)-me500mc)
 
 KBUILD_AFLAGS += $(cpu-as-y)
 KBUILD_CFLAGS += $(cpu-as-y)
index abd01d466de4211953865fc6398fb09aa26efc1e..6b124f73f67ab441488b4e0ca50bbb4a8c450ce0 100644 (file)
@@ -37,12 +37,13 @@ fman0: fman@400000 {
        #size-cells = <1>;
        cell-index = <0>;
        compatible = "fsl,fman";
-       ranges = <0 0x400000 0x100000>;
-       reg = <0x400000 0x100000>;
+       ranges = <0 0x400000 0xfe000>;
+       reg = <0x400000 0xfe000>;
        interrupts = <96 2 0 0>, <16 2 1 1>;
        clocks = <&clockgen 3 0>;
        clock-names = "fmanclk";
        fsl,qman-channel-range = <0x40 0xc>;
+       ptimer-handle = <&ptp_timer0>;
 
        muram@0 {
                compatible = "fsl,fman-muram";
@@ -93,9 +94,10 @@ fman0_oh_0x7: port@87000 {
                reg = <0x87000 0x1000>;
                status = "disabled";
        };
+};
 
-       ptp_timer0: ptp-timer@fe000 {
-               compatible = "fsl,fman-ptp-timer";
-               reg = <0xfe000 0x1000>;
-       };
+ptp_timer0: ptp-timer@4fe000 {
+       compatible = "fsl,fman-ptp-timer";
+       reg = <0x4fe000 0x1000>;
+       interrupts = <96 2 0 0>;
 };
index debea75fd3f0e54b3f8662d61fbe718decafb2ad..b80aaf5f00a1939467ffd4890d7ce5cfab473444 100644 (file)
@@ -37,12 +37,13 @@ fman1: fman@500000 {
        #size-cells = <1>;
        cell-index = <1>;
        compatible = "fsl,fman";
-       ranges = <0 0x500000 0x100000>;
-       reg = <0x500000 0x100000>;
+       ranges = <0 0x500000 0xfe000>;
+       reg = <0x500000 0xfe000>;
        interrupts = <97 2 0 0>, <16 2 1 0>;
        clocks = <&clockgen 3 1>;
        clock-names = "fmanclk";
        fsl,qman-channel-range = <0x60 0xc>;
+       ptimer-handle = <&ptp_timer1>;
 
        muram@0 {
                compatible = "fsl,fman-muram";
@@ -93,9 +94,10 @@ fman1_oh_0x7: port@87000 {
                reg = <0x87000 0x1000>;
                status = "disabled";
        };
+};
 
-       ptp_timer1: ptp-timer@fe000 {
-               compatible = "fsl,fman-ptp-timer";
-               reg = <0xfe000 0x1000>;
-       };
+ptp_timer1: ptp-timer@5fe000 {
+       compatible = "fsl,fman-ptp-timer";
+       reg = <0x5fe000 0x1000>;
+       interrupts = <97 2 0 0>;
 };
index 3a20e0d1a6d26cc20c2bd97afc5c5a220a72c6b6..d3720fdde26ca11ce11e4abdff9f76a0b5b3bc6c 100644 (file)
@@ -37,12 +37,13 @@ fman0: fman@400000 {
        #size-cells = <1>;
        cell-index = <0>;
        compatible = "fsl,fman";
-       ranges = <0 0x400000 0x100000>;
-       reg = <0x400000 0x100000>;
+       ranges = <0 0x400000 0xfe000>;
+       reg = <0x400000 0xfe000>;
        interrupts = <96 2 0 0>, <16 2 1 1>;
        clocks = <&clockgen 3 0>;
        clock-names = "fmanclk";
        fsl,qman-channel-range = <0x800 0x10>;
+       ptimer-handle = <&ptp_timer0>;
 
        muram@0 {
                compatible = "fsl,fman-muram";
@@ -98,9 +99,10 @@ xmdio0: mdio@fd000 {
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xfd000 0x1000>;
        };
+};
 
-       ptp_timer0: ptp-timer@fe000 {
-               compatible = "fsl,fman-ptp-timer";
-               reg = <0xfe000 0x1000>;
-       };
+ptp_timer0: ptp-timer@4fe000 {
+       compatible = "fsl,fman-ptp-timer";
+       reg = <0x4fe000 0x1000>;
+       interrupts = <96 2 0 0>;
 };
index 82750ac944c7be227ff8c3801fffb1f2bdeb197a..ae34c204a5bccd2f39931b1982ca1c4428dbde10 100644 (file)
@@ -37,12 +37,13 @@ fman1: fman@500000 {
        #size-cells = <1>;
        cell-index = <1>;
        compatible = "fsl,fman";
-       ranges = <0 0x500000 0x100000>;
-       reg = <0x500000 0x100000>;
+       ranges = <0 0x500000 0xfe000>;
+       reg = <0x500000 0xfe000>;
        interrupts = <97 2 0 0>, <16 2 1 0>;
        clocks = <&clockgen 3 1>;
        clock-names = "fmanclk";
        fsl,qman-channel-range = <0x820 0x10>;
+       ptimer-handle = <&ptp_timer1>;
 
        muram@0 {
                compatible = "fsl,fman-muram";
@@ -98,9 +99,10 @@ mdio@fd000 {
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xfd000 0x1000>;
        };
+};
 
-       ptp_timer1: ptp-timer@fe000 {
-               compatible = "fsl,fman-ptp-timer";
-               reg = <0xfe000 0x1000>;
-       };
+ptp_timer1: ptp-timer@5fe000 {
+       compatible = "fsl,fman-ptp-timer";
+       reg = <0x5fe000 0x1000>;
+       interrupts = <97 2 0 0>;
 };
index 7f60b60601764007ba7cccfad2394b3db91524af..02f2755842ccab93a7588714ea7272f7f67785ab 100644 (file)
@@ -37,12 +37,13 @@ fman0: fman@400000 {
        #size-cells = <1>;
        cell-index = <0>;
        compatible = "fsl,fman";
-       ranges = <0 0x400000 0x100000>;
-       reg = <0x400000 0x100000>;
+       ranges = <0 0x400000 0xfe000>;
+       reg = <0x400000 0xfe000>;
        interrupts = <96 2 0 0>, <16 2 1 1>;
        clocks = <&clockgen 3 0>;
        clock-names = "fmanclk";
        fsl,qman-channel-range = <0x800 0x10>;
+       ptimer-handle = <&ptp_timer0>;
 
        muram@0 {
                compatible = "fsl,fman-muram";
@@ -86,9 +87,10 @@ xmdio0: mdio@fd000 {
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xfd000 0x1000>;
        };
+};
 
-       ptp_timer0: ptp-timer@fe000 {
-               compatible = "fsl,fman-ptp-timer";
-               reg = <0xfe000 0x1000>;
-       };
+ptp_timer0: ptp-timer@4fe000 {
+       compatible = "fsl,fman-ptp-timer";
+       reg = <0x4fe000 0x1000>;
+       interrupts = <96 2 0 0>;
 };
index 6a6673907e45eeb934e66023e8630fe21d8fd31d..82e44b1a00ae91219f482afa654f2d0440c5aa78 100644 (file)
@@ -108,6 +108,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
 }
 
 #define check_pgt_cache()      do { } while (0)
+#define get_hugepd_cache_index(x)  (x)
 
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
@@ -137,7 +138,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
 #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
index af5f2baac80f991951ac77dc3b3eaeb1e72aee46..a069dfcac9a94a94efe66a162cbbff88f1596934 100644 (file)
@@ -49,6 +49,27 @@ static inline int hugepd_ok(hugepd_t hpd)
 }
 #define is_hugepd(hpd)         (hugepd_ok(hpd))
 
+/*
+ * 16M and 16G huge page directory tables are allocated from slab cache
+ *
+ */
+#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
+#define H_16G_CACHE_INDEX                                                      \
+       (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
+
+static inline int get_hugepd_cache_index(int index)
+{
+       switch (index) {
+       case H_16M_CACHE_INDEX:
+               return HTLB_16M_INDEX;
+       case H_16G_CACHE_INDEX:
+               return HTLB_16G_INDEX;
+       default:
+               BUG();
+       }
+       /* should not reach */
+}
+
 #else /* !CONFIG_HUGETLB_PAGE */
 static inline int pmd_huge(pmd_t pmd) { return 0; }
 static inline int pud_huge(pud_t pud) { return 0; }
index fb4b3ba52339e9233207ce7345e2f9d920835f97..d7ee249d6890cb30fcf10ebd608665d57ba2f781 100644 (file)
@@ -45,8 +45,17 @@ static inline int hugepd_ok(hugepd_t hpd)
 {
        return 0;
 }
+
 #define is_hugepd(pdep)                        0
 
+/*
+ * This should never get called
+ */
+static inline int get_hugepd_cache_index(int index)
+{
+       BUG();
+}
+
 #else /* !CONFIG_HUGETLB_PAGE */
 static inline int pmd_huge(pmd_t pmd) { return 0; }
 static inline int pud_huge(pud_t pud) { return 0; }
index 63cee159022b51400fbc52dd21ebd31f55f3db67..42aafba7a30834db7643213a3aec583a3cdd1b6a 100644 (file)
@@ -287,6 +287,11 @@ enum pgtable_index {
        PMD_INDEX,
        PUD_INDEX,
        PGD_INDEX,
+       /*
+        * Below are used with 4k page size and hugetlb
+        */
+       HTLB_16M_INDEX,
+       HTLB_16G_INDEX,
 };
 
 extern unsigned long __vmalloc_start;
index 0f571e0ebca19ccdc8b89540324ccc71849b75e5..bd9ba8defd7258ab6e853be0c39d7290f9f02393 100644 (file)
@@ -8,7 +8,7 @@ extern void arch_touch_nmi_watchdog(void);
 static inline void arch_touch_nmi_watchdog(void) {}
 #endif
 
-#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE)
+#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
 extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
                                           bool exclude_self);
 #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
index 1707781d2f208096517859d94f30c6533e0a3771..8825953c225b2e48e9e0cd7938d2185b5e821977 100644 (file)
@@ -109,6 +109,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
 }
 
 #define check_pgt_cache()      do { } while (0)
+#define get_hugepd_cache_index(x)      (x)
 
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
@@ -139,7 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
        tlb_flush_pgtable(tlb, address);
-       pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
 #endif /* _ASM_POWERPC_PGALLOC_32_H */
index 0e693f322cb2e03a353e3803517820f4c324498b..e2d62d033708c4494a5e95d941b8d34cad3ec3e0 100644 (file)
@@ -141,6 +141,7 @@ static inline void pgtable_free(void *table, int shift)
        }
 }
 
+#define get_hugepd_cache_index(x)      (x)
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
 {
index cfcf6a874cfab3a094d4c931bdc7cade28184665..01b5171ea189994ab394685f8f4645a3fd86594c 100644 (file)
@@ -393,3 +393,4 @@ SYSCALL(pkey_alloc)
 SYSCALL(pkey_free)
 SYSCALL(pkey_mprotect)
 SYSCALL(rseq)
+COMPAT_SYS(io_pgetevents)
index 1e9708632dce30e1093d48dbed2db8d0d90a4e89..c19379f0a32e2b0fe59a9634140582a8afbc291e 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            388
+#define NR_syscalls            389
 
 #define __NR__exit __NR_exit
 
index ac5ba55066dd76a26f133d91623309036bcad4c8..985534d0b448b7ae7b9d4cad7c3f9257d4ce0789 100644 (file)
 #define __NR_pkey_free         385
 #define __NR_pkey_mprotect     386
 #define __NR_rseq              387
+#define __NR_io_pgetevents     388
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 4be1c0de9406b159eede5503b3a8044645dac7fa..96dd3d871986428dadcbc9bb350c1b876fde8ab4 100644 (file)
@@ -711,7 +711,8 @@ static __init void cpufeatures_cpu_quirks(void)
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
-       } else /* DD2.1 and up have DD2_1 */
+       } else if ((version & 0xffff0000) == 0x004e0000)
+               /* DD2.1 and up have DD2_1 */
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
 
        if ((version & 0xffff0000) == 0x004e0000) {
index 4f861055a8521276c89c71cd67c41425c38c0ac2..d63b488d34d79033fa7229bfeb4d306cf6b56bc0 100644 (file)
@@ -285,9 +285,6 @@ pci_bus_to_hose(int bus)
  * Note that the returned IO or memory base is a physical address
  */
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(pciconfig_iobase, long, which,
                unsigned long, bus, unsigned long, devfn)
 {
@@ -313,4 +310,3 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which,
 
        return result;
 }
-#pragma GCC diagnostic pop
index 812171c09f42fecf2c97757e37f7ad45ec9a35d8..dff28f90351245d58f6b77130fb26fcb73351c5d 100644 (file)
@@ -203,9 +203,6 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose)
 #define IOBASE_ISA_IO          3
 #define IOBASE_ISA_MEM         4
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
                          unsigned long, in_devfn)
 {
@@ -259,7 +256,6 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
 
        return -EOPNOTSUPP;
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_NUMA
 int pcibus_to_node(struct pci_bus *bus)
index 7fb9f83dcde889f8340daa94ec66cc6b3cb1804b..8afd146bc9c70dc6480e2fff20d6239d327e33d3 100644 (file)
@@ -1051,9 +1051,6 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
 }
 
 /* We assume to be passed big endian arguments */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
 {
        struct rtas_args args;
@@ -1140,7 +1137,6 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
 
        return 0;
 }
-#pragma GCC diagnostic pop
 
 /*
  * Call early during boot, before mem init, to retrieve the RTAS
index 62b1a40d895777a10b3c7279fde05583ae3dc66b..40b44bb53a4efbb8b25c64786262e0123a3da640 100644 (file)
@@ -700,12 +700,19 @@ EXPORT_SYMBOL(check_legacy_ioport);
 static int ppc_panic_event(struct notifier_block *this,
                              unsigned long event, void *ptr)
 {
+       /*
+        * panic does a local_irq_disable, but we really
+        * want interrupts to be hard disabled.
+        */
+       hard_irq_disable();
+
        /*
         * If firmware-assisted dump has been registered then trigger
         * firmware-assisted dump and let firmware handle everything else.
         */
        crash_fadump(NULL, ptr);
-       ppc_md.panic(ptr);  /* May not return */
+       if (ppc_md.panic)
+               ppc_md.panic(ptr);  /* May not return */
        return NOTIFY_DONE;
 }
 
@@ -716,7 +723,8 @@ static struct notifier_block ppc_panic_block = {
 
 void __init setup_panic(void)
 {
-       if (!ppc_md.panic)
+       /* PPC64 always does a hard irq disable in its panic handler */
+       if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
                return;
        atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
 }
index 7a7ce8ad455e1533498fc3c7a5d8a853abb4d9cd..225bc5f91049436277e7c45787d8a7370d6dac78 100644 (file)
@@ -387,6 +387,14 @@ void early_setup_secondary(void)
 
 #endif /* CONFIG_SMP */
 
+void panic_smp_self_stop(void)
+{
+       hard_irq_disable();
+       spin_begin();
+       while (1)
+               spin_cpu_relax();
+}
+
 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
 static bool use_spinloop(void)
 {
index 17fe4339ba596150e8dc2b0eaf698aee3303ad18..b3e8db376ecde459bb8b5a1cd00b10c9606df289 100644 (file)
@@ -134,7 +134,7 @@ static void do_signal(struct task_struct *tsk)
        /* Re-enable the breakpoints for the signal stack */
        thread_change_pc(tsk, tsk->thread.regs);
 
-       rseq_signal_deliver(tsk->thread.regs);
+       rseq_signal_deliver(&ksig, tsk->thread.regs);
 
        if (is32) {
                if (ksig.ka.sa.sa_flags & SA_SIGINFO)
@@ -170,7 +170,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               rseq_handle_notify_resume(regs);
+               rseq_handle_notify_resume(NULL, regs);
        }
 
        user_enter();
index 5eedbb282d42fcf2caed7f3d09d0227b2e9e0734..e6474a45cef50623be68bc1fbf0b83635275dceb 100644 (file)
@@ -1038,9 +1038,6 @@ static int do_setcontext_tm(struct ucontext __user *ucp,
 }
 #endif
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 #ifdef CONFIG_PPC64
 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
                       struct ucontext __user *, new_ctx, int, ctx_size)
@@ -1134,7 +1131,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
        set_thread_flag(TIF_RESTOREALL);
        return 0;
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_PPC64
 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
@@ -1231,9 +1227,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
        return 0;
 }
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 #ifdef CONFIG_PPC32
 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
                         int, ndbg, struct sig_dbg_op __user *, dbg)
@@ -1337,7 +1330,6 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
        return 0;
 }
 #endif
-#pragma GCC diagnostic pop
 
 /*
  * OK, we're invoking a handler
index d42b600203892d57d7fb3398f7cad38090df9ce6..83d51bf586c7e1ec3697a424a33a1559579147b8 100644 (file)
@@ -625,9 +625,6 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
 /*
  * Handle {get,set,swap}_context operations
  */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
                struct ucontext __user *, new_ctx, long, ctx_size)
 {
@@ -693,7 +690,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
        set_thread_flag(TIF_RESTOREALL);
        return 0;
 }
-#pragma GCC diagnostic pop
 
 
 /*
index 5eadfffabe35134f6f34a6acca61c738c4efcbc9..4794d6b4f4d27a4db7f637a309897d64f1ad9e9c 100644 (file)
@@ -600,9 +600,6 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
        nmi_ipi_busy_count--;
        nmi_ipi_unlock();
 
-       /* Remove this CPU */
-       set_cpu_online(smp_processor_id(), false);
-
        spin_begin();
        while (1)
                spin_cpu_relax();
@@ -617,9 +614,6 @@ void smp_send_stop(void)
 
 static void stop_this_cpu(void *dummy)
 {
-       /* Remove this CPU */
-       set_cpu_online(smp_processor_id(), false);
-
        hard_irq_disable();
        spin_begin();
        while (1)
index 07e97f289c5207389ffb817330e5d66a4beb6e70..e2c50b55138f8ab52eecace4c6aad72c382e6bcd 100644 (file)
@@ -196,7 +196,7 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
 #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
 static void handle_backtrace_ipi(struct pt_regs *regs)
 {
        nmi_cpu_backtrace(regs);
@@ -242,4 +242,4 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
 {
        nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
 }
-#endif /* CONFIG_PPC64 */
+#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
index 083fa06962fda045cb5f00ac0ea5b61046e3d4c4..466216506eb2f4bfa7b6b94ed89140914b1ea682 100644 (file)
@@ -62,9 +62,6 @@ static inline long do_mmap2(unsigned long addr, size_t len,
        return ret;
 }
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len,
                unsigned long, prot, unsigned long, flags,
                unsigned long, fd, unsigned long, pgoff)
@@ -78,7 +75,6 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
 {
        return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_PPC32
 /*
index 7c5f479c5c00fb0f562801285e3795400edab084..8a9a49c138652ba2b971a265db233988e01aa7b1 100644 (file)
@@ -337,7 +337,8 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
        if (shift >= pdshift)
                hugepd_free(tlb, hugepte);
        else
-               pgtable_free_tlb(tlb, hugepte, pdshift - shift);
+               pgtable_free_tlb(tlb, hugepte,
+                                get_hugepd_cache_index(pdshift - shift));
 }
 
 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
index c1f4ca45c93a488df07d66525f0d935ca342f84c..4afbfbb64bfd0a21254a177f4fa3df3c37bff6ea 100644 (file)
@@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index)
        case PUD_INDEX:
                kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
                break;
+#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
+               /* 16M hugepd directory at pud level */
+       case HTLB_16M_INDEX:
+               BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
+               kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
+               break;
+               /* 16G hugepd directory at the pgd level */
+       case HTLB_16G_INDEX:
+               BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
+               kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
+               break;
+#endif
                /* We don't free pgd table via RCU callback */
        default:
                BUG();
index 75cb646a79c383bc39c578a49ddf48a23ee9c44b..9d16ee251fc0131118c375282b2c3e103a2e0b0f 100644 (file)
@@ -186,9 +186,6 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
  * in a 2-bit field won't allow writes to a page that is otherwise
  * write-protected.
  */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
                unsigned long, len, u32 __user *, map)
 {
@@ -272,4 +269,3 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
        up_write(&mm->mmap_sem);
        return err;
 }
-#pragma GCC diagnostic pop
index 67a6e86d3e7efb25e170af7218453230703aa4a5..1135b43a597c5045be9a0425b67a5e5edd17d876 100644 (file)
@@ -689,22 +689,17 @@ EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
 
-void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
-                    unsigned long end)
+static inline void __radix__flush_tlb_range(struct mm_struct *mm,
+                                       unsigned long start, unsigned long end,
+                                       bool flush_all_sizes)
 
 {
-       struct mm_struct *mm = vma->vm_mm;
        unsigned long pid;
        unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
        unsigned long page_size = 1UL << page_shift;
        unsigned long nr_pages = (end - start) >> page_shift;
        bool local, full;
 
-#ifdef CONFIG_HUGETLB_PAGE
-       if (is_vm_hugetlb_page(vma))
-               return radix__flush_hugetlb_tlb_range(vma, start, end);
-#endif
-
        pid = mm->context.id;
        if (unlikely(pid == MMU_NO_CONTEXT))
                return;
@@ -738,37 +733,64 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                                _tlbie_pid(pid, RIC_FLUSH_TLB);
                }
        } else {
-               bool hflush = false;
+               bool hflush = flush_all_sizes;
+               bool gflush = flush_all_sizes;
                unsigned long hstart, hend;
+               unsigned long gstart, gend;
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
-               hend = end >> HPAGE_PMD_SHIFT;
-               if (hstart < hend) {
-                       hstart <<= HPAGE_PMD_SHIFT;
-                       hend <<= HPAGE_PMD_SHIFT;
+               if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
                        hflush = true;
+
+               if (hflush) {
+                       hstart = (start + PMD_SIZE - 1) & PMD_MASK;
+                       hend = end & PMD_MASK;
+                       if (hstart == hend)
+                               hflush = false;
+               }
+
+               if (gflush) {
+                       gstart = (start + PUD_SIZE - 1) & PUD_MASK;
+                       gend = end & PUD_MASK;
+                       if (gstart == gend)
+                               gflush = false;
                }
-#endif
 
                asm volatile("ptesync": : :"memory");
                if (local) {
                        __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
                        if (hflush)
                                __tlbiel_va_range(hstart, hend, pid,
-                                               HPAGE_PMD_SIZE, MMU_PAGE_2M);
+                                               PMD_SIZE, MMU_PAGE_2M);
+                       if (gflush)
+                               __tlbiel_va_range(gstart, gend, pid,
+                                               PUD_SIZE, MMU_PAGE_1G);
                        asm volatile("ptesync": : :"memory");
                } else {
                        __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
                        if (hflush)
                                __tlbie_va_range(hstart, hend, pid,
-                                               HPAGE_PMD_SIZE, MMU_PAGE_2M);
+                                               PMD_SIZE, MMU_PAGE_2M);
+                       if (gflush)
+                               __tlbie_va_range(gstart, gend, pid,
+                                               PUD_SIZE, MMU_PAGE_1G);
                        fixup_tlbie();
                        asm volatile("eieio; tlbsync; ptesync": : :"memory");
                }
        }
        preempt_enable();
 }
+
+void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                    unsigned long end)
+
+{
+#ifdef CONFIG_HUGETLB_PAGE
+       if (is_vm_hugetlb_page(vma))
+               return radix__flush_hugetlb_tlb_range(vma, start, end);
+#endif
+
+       __radix__flush_tlb_range(vma->vm_mm, start, end, false);
+}
 EXPORT_SYMBOL(radix__flush_tlb_range);
 
 static int radix_get_mmu_psize(int page_size)
@@ -837,6 +859,8 @@ void radix__tlb_flush(struct mmu_gather *tlb)
        int psize = 0;
        struct mm_struct *mm = tlb->mm;
        int page_size = tlb->page_size;
+       unsigned long start = tlb->start;
+       unsigned long end = tlb->end;
 
        /*
         * if page size is not something we understand, do a full mm flush
@@ -847,15 +871,45 @@ void radix__tlb_flush(struct mmu_gather *tlb)
         */
        if (tlb->fullmm) {
                __flush_all_mm(mm, true);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+       } else if (mm_tlb_flush_nested(mm)) {
+               /*
+                * If there is a concurrent invalidation that is clearing ptes,
+                * then it's possible this invalidation will miss one of those
+                * cleared ptes and miss flushing the TLB. If this invalidate
+                * returns before the other one flushes TLBs, that can result
+                * in it returning while there are still valid TLBs inside the
+                * range to be invalidated.
+                *
+                * See mm/memory.c:tlb_finish_mmu() for more details.
+                *
+                * The solution to this is ensure the entire range is always
+                * flushed here. The problem for powerpc is that the flushes
+                * are page size specific, so this "forced flush" would not
+                * do the right thing if there are a mix of page sizes in
+                * the range to be invalidated. So use __flush_tlb_range
+                * which invalidates all possible page sizes in the range.
+                *
+                * PWC flush probably is not be required because the core code
+                * shouldn't free page tables in this path, but accounting
+                * for the possibility makes us a bit more robust.
+                *
+                * need_flush_all is an uncommon case because page table
+                * teardown should be done with exclusive locks held (but
+                * after locks are dropped another invalidate could come
+                * in), it could be optimized further if necessary.
+                */
+               if (!tlb->need_flush_all)
+                       __radix__flush_tlb_range(mm, start, end, true);
+               else
+                       radix__flush_all_mm(mm);
+#endif
        } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
                if (!tlb->need_flush_all)
                        radix__flush_tlb_mm(mm);
                else
                        radix__flush_all_mm(mm);
        } else {
-               unsigned long start = tlb->start;
-               unsigned long end = tlb->end;
-
                if (!tlb->need_flush_all)
                        radix__flush_tlb_range_psize(mm, start, end, psize);
                else
@@ -1043,6 +1097,8 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
                for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
                        if (sib == cpu)
                                continue;
+                       if (!cpu_possible(sib))
+                               continue;
                        if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
                                flush = true;
                }
index 7c968e46736faa598861259d5a3781577256a7ff..12e6e4d3060236a05fcd987690b385576d8821cb 100644 (file)
 #define DBG(x...)
 #endif
 
-/* Apparently the RTC stores seconds since 1 Jan 1904 */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
 #define RTC_OFFSET     2082844800
 
 /*
@@ -97,8 +101,11 @@ static time64_t cuda_get_time(void)
        if (req.reply_len != 7)
                printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
                       req.reply_len);
-       now = (req.reply[3] << 24) + (req.reply[4] << 16)
-               + (req.reply[5] << 8) + req.reply[6];
+       now = (u32)((req.reply[3] << 24) + (req.reply[4] << 16) +
+                   (req.reply[5] << 8) + req.reply[6]);
+       /* it's either after year 2040, or the RTC has gone backwards */
+       WARN_ON(now < RTC_OFFSET);
+
        return now - RTC_OFFSET;
 }
 
@@ -106,10 +113,10 @@ static time64_t cuda_get_time(void)
 
 static int cuda_set_rtc_time(struct rtc_time *tm)
 {
-       time64_t nowtime;
+       u32 nowtime;
        struct adb_request req;
 
-       nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET;
+       nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
        if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
                         nowtime >> 24, nowtime >> 16, nowtime >> 8,
                         nowtime) < 0)
@@ -140,8 +147,12 @@ static time64_t pmu_get_time(void)
        if (req.reply_len != 4)
                printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
                       req.reply_len);
-       now = (req.reply[0] << 24) + (req.reply[1] << 16)
-               + (req.reply[2] << 8) + req.reply[3];
+       now = (u32)((req.reply[0] << 24) + (req.reply[1] << 16) +
+                   (req.reply[2] << 8) + req.reply[3]);
+
+       /* it's either after year 2040, or the RTC has gone backwards */
+       WARN_ON(now < RTC_OFFSET);
+
        return now - RTC_OFFSET;
 }
 
@@ -149,10 +160,10 @@ static time64_t pmu_get_time(void)
 
 static int pmu_set_rtc_time(struct rtc_time *tm)
 {
-       time64_t nowtime;
+       u32 nowtime;
        struct adb_request req;
 
-       nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET;
+       nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
        if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
                        nowtime >> 16, nowtime >> 8, nowtime) < 0)
                return -ENXIO;
index 0563fd3e84585769f7acd0f093f30a48eaab9f54..480bb02ccacdd07de17ffda81fae140ee748a505 100644 (file)
@@ -6,36 +6,38 @@
 
 struct css_general_char {
        u64 : 12;
-       u32 dynio : 1;   /* bit 12 */
-       u32 : 4;
-       u32 eadm : 1;    /* bit 17 */
-       u32 : 23;
-       u32 aif : 1;     /* bit 41 */
-       u32 : 3;
-       u32 mcss : 1;    /* bit 45 */
-       u32 fcs : 1;     /* bit 46 */
-       u32 : 1;
-       u32 ext_mb : 1;  /* bit 48 */
-       u32 : 7;
-       u32 aif_tdd : 1; /* bit 56 */
-       u32 : 1;
-       u32 qebsm : 1;   /* bit 58 */
-       u32 : 2;
-       u32 aiv : 1;     /* bit 61 */
-       u32 : 5;
-       u32 aif_osa : 1; /* bit 67 */
-       u32 : 12;
-       u32 eadm_rf : 1; /* bit 80 */
-       u32 : 1;
-       u32 cib : 1;     /* bit 82 */
-       u32 : 5;
-       u32 fcx : 1;     /* bit 88 */
-       u32 : 19;
-       u32 alt_ssi : 1; /* bit 108 */
-       u32 : 1;
-       u32 narf : 1;    /* bit 110 */
-       u32 : 12;
-       u32 util_str : 1;/* bit 123 */
+       u64 dynio : 1;   /* bit 12 */
+       u64 : 4;
+       u64 eadm : 1;    /* bit 17 */
+       u64 : 23;
+       u64 aif : 1;     /* bit 41 */
+       u64 : 3;
+       u64 mcss : 1;    /* bit 45 */
+       u64 fcs : 1;     /* bit 46 */
+       u64 : 1;
+       u64 ext_mb : 1;  /* bit 48 */
+       u64 : 7;
+       u64 aif_tdd : 1; /* bit 56 */
+       u64 : 1;
+       u64 qebsm : 1;   /* bit 58 */
+       u64 : 2;
+       u64 aiv : 1;     /* bit 61 */
+       u64 : 2;
+
+       u64 : 3;
+       u64 aif_osa : 1; /* bit 67 */
+       u64 : 12;
+       u64 eadm_rf : 1; /* bit 80 */
+       u64 : 1;
+       u64 cib : 1;     /* bit 82 */
+       u64 : 5;
+       u64 fcx : 1;     /* bit 88 */
+       u64 : 19;
+       u64 alt_ssi : 1; /* bit 108 */
+       u64 : 1;
+       u64 narf : 1;    /* bit 110 */
+       u64 : 12;
+       u64 util_str : 1;/* bit 123 */
 } __packed;
 
 extern struct css_general_char css_general_characteristics;
index 3510c0fd06f4004aeccc4d1e21dded8f5ca8a00a..39d901476ee5d351f1e34a66f0828ca9d9fe522b 100644 (file)
 
 #define SO_ZEROCOPY            60
 
+#define SO_TXTIME              61
+#define SCM_TXTIME             SO_TXTIME
+
 #endif /* _ASM_SOCKET_H */
index d2db8acb1a55480895e38fdf142c3d074610230d..5f0234ec8038eb2d11e93b190f3f35e29f29207b 100644 (file)
@@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
                goto free_addrs;
        }
        if (bpf_jit_prog(&jit, fp)) {
+               bpf_jit_binary_free(header);
                fp = orig_fp;
                goto free_addrs;
        }
index d58520c2e6ff2c77788d3546d17c960fd067714f..7ea35e5601b6bed654363456c4127f28beb7cada 100644 (file)
 
 #define SO_ZEROCOPY            0x003e
 
+#define SO_TXTIME              0x003f
+#define SCM_TXTIME             SO_TXTIME
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT       0x5002
index f0a6ea22429d7384d81f81e38fb39dbfc9e720ed..a08e82856563ddc34079e96b592e5c60edbc30aa 100644 (file)
@@ -258,11 +258,6 @@ archscripts: scripts_basic
 archheaders:
        $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
 
-archprepare:
-ifeq ($(CONFIG_KEXEC_FILE),y)
-       $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
-endif
-
 ###
 # Kernel objects
 
@@ -327,7 +322,6 @@ archclean:
        $(Q)rm -rf $(objtree)/arch/x86_64
        $(Q)$(MAKE) $(clean)=$(boot)
        $(Q)$(MAKE) $(clean)=arch/x86/tools
-       $(Q)$(MAKE) $(clean)=arch/x86/purgatory
 
 define archhelp
   echo  '* bzImage      - Compressed kernel image (arch/x86/boot/bzImage)'
index a8a8642d2b0b802424caf7b4f925edbce7e3284e..e57665b4ba1cbf9cf8ff70901d0434be510609c7 100644 (file)
@@ -118,7 +118,7 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
        void *romimage;
 
        status = efi_call_proto(efi_pci_io_protocol, attributes, pci,
-                               EfiPciIoAttributeOperationGet, 0, 0,
+                               EfiPciIoAttributeOperationGet, 0ULL,
                                &attributes);
        if (status != EFI_SUCCESS)
                return status;
index 92190879b228c82f4ec681aa9c07bccc0e32204a..3b2490b81918128a61f6df1807788436d4f8ceb7 100644 (file)
@@ -164,7 +164,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
                if (cached_flags & _TIF_NOTIFY_RESUME) {
                        clear_thread_flag(TIF_NOTIFY_RESUME);
                        tracehook_notify_resume(regs);
-                       rseq_handle_notify_resume(regs);
+                       rseq_handle_notify_resume(NULL, regs);
                }
 
                if (cached_flags & _TIF_USER_RETURN_NOTIFY)
index 2582881d19ceeeb75a9a90588547914ccdefdbd0..c371bfee137ac976a01716118faf6cf490a3b5aa 100644 (file)
@@ -477,7 +477,7 @@ ENTRY(entry_SYSENTER_32)
         * whereas POPF does not.)
         */
        addl    $PT_EFLAGS-PT_DS, %esp  /* point esp at pt_regs->flags */
-       btr     $X86_EFLAGS_IF_BIT, (%esp)
+       btrl    $X86_EFLAGS_IF_BIT, (%esp)
        popfl
 
        /*
index 9de7f1e1dede7f6e6ebdc66e5f63756a173cdc0a..7d0df78db727296d1c4451e3a930033669f47aa3 100644 (file)
@@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
        pushq   %rdx                    /* pt_regs->dx */
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       pushq   %r8                     /* pt_regs->r8 */
+       pushq   $0                      /* pt_regs->r8  = 0 */
        xorl    %r8d, %r8d              /* nospec   r8 */
-       pushq   %r9                     /* pt_regs->r9 */
+       pushq   $0                      /* pt_regs->r9  = 0 */
        xorl    %r9d, %r9d              /* nospec   r9 */
-       pushq   %r10                    /* pt_regs->r10 */
+       pushq   $0                      /* pt_regs->r10 = 0 */
        xorl    %r10d, %r10d            /* nospec   r10 */
-       pushq   %r11                    /* pt_regs->r11 */
+       pushq   $0                      /* pt_regs->r11 = 0 */
        xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
@@ -374,13 +374,13 @@ ENTRY(entry_INT80_compat)
        pushq   %rcx                    /* pt_regs->cx */
        xorl    %ecx, %ecx              /* nospec   cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       pushq   $0                      /* pt_regs->r8  = 0 */
+       pushq   %r8                     /* pt_regs->r8 */
        xorl    %r8d, %r8d              /* nospec   r8 */
-       pushq   $0                      /* pt_regs->r9  = 0 */
+       pushq   %r9                     /* pt_regs->r9 */
        xorl    %r9d, %r9d              /* nospec   r9 */
-       pushq   $0                      /* pt_regs->r10 = 0 */
+       pushq   %r10                    /* pt_regs->r10*/
        xorl    %r10d, %r10d            /* nospec   r10 */
-       pushq   $0                      /* pt_regs->r11 = 0 */
+       pushq   %r11                    /* pt_regs->r11 */
        xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
index 042b5e892ed1063769b253bdf35e31171eb55c4d..14de0432d288414bd1437e44b8cb13facc6f12e9 100644 (file)
@@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 {
        unsigned long mask;
 
-       asm ("cmp %1,%2; sbb %0,%0;"
+       asm volatile ("cmp %1,%2; sbb %0,%0;"
                        :"=r" (mask)
                        :"g"(size),"r" (index)
                        :"cc");
index ada6410fd2ecf6fdde039a185ce570b20af53fca..fbd578daa66e97416058e961dd440774fa9ed586 100644 (file)
@@ -184,6 +184,9 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
 
 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
 {
+       if (!pgtable_l5_enabled())
+               return;
+
        BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
        free_page((unsigned long)p4d);
 }
index 99ecde23c3ec03e02a9aba157e4b31e3c6f53ed7..5715647fc4feca86c8b00e299b347ee602b1b4e6 100644 (file)
@@ -898,7 +898,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 #define pgd_page(pgd)  pfn_to_page(pgd_pfn(pgd))
 
 /* to find an entry in a page-table-directory. */
-static __always_inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
 {
        if (!pgtable_l5_enabled())
                return (p4d_t *)pgd;
index 0fdcd21dadbd6422bf40f5cbb2361c08c5fafc14..3c5385f9a88fc1e78729647566d819abbd210b42 100644 (file)
@@ -216,7 +216,7 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
 }
 #endif
 
-static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
+static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
 {
        pgd_t pgd;
 
@@ -230,7 +230,7 @@ static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
        *p4dp = native_make_p4d(native_pgd_val(pgd));
 }
 
-static __always_inline void native_p4d_clear(p4d_t *p4d)
+static inline void native_p4d_clear(p4d_t *p4d)
 {
        native_set_p4d(p4d, native_make_p4d(0));
 }
index 425e6b8b95478248dd3a32122b1aca408691cadf..6aa8499e1f62042a510aaafb2a1ef1e3a89804bd 100644 (file)
 #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK    0x0000001f
 #define VMX_MISC_SAVE_EFER_LMA                 0x00000020
 #define VMX_MISC_ACTIVITY_HLT                  0x00000040
+#define VMX_MISC_ZERO_LEN_INS                  0x40000000
 
 /* VMFUNC functions */
 #define VMX_VMFUNC_EPTP_SWITCHING               0x00000001
@@ -351,11 +352,13 @@ enum vmcs_field {
 #define VECTORING_INFO_VALID_MASK              INTR_INFO_VALID_MASK
 
 #define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
+#define INTR_TYPE_RESERVED              (1 << 8) /* reserved */
 #define INTR_TYPE_NMI_INTR             (2 << 8) /* NMI */
 #define INTR_TYPE_HARD_EXCEPTION       (3 << 8) /* processor exception */
 #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
 #define INTR_TYPE_PRIV_SW_EXCEPTION    (5 << 8) /* ICE breakpoint - undocumented */
 #define INTR_TYPE_SOFT_EXCEPTION       (6 << 8) /* software exception */
+#define INTR_TYPE_OTHER_EVENT           (7 << 8) /* other event */
 
 /* GUEST_INTERRUPTIBILITY_INFO flags. */
 #define GUEST_INTR_STATE_STI           0x00000001
index efaf2d4f9c3c7983221298c2ecb37ce367345b1e..d492752f79e1b9f120de025a9fa89b692e720705 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/crash_dump.h>
 #include <linux/reboot.h>
+#include <linux/memory.h>
 
 #include <asm/uv/uv_mmrs.h>
 #include <asm/uv/uv_hub.h>
@@ -392,6 +393,51 @@ extern int uv_hub_info_version(void)
 }
 EXPORT_SYMBOL(uv_hub_info_version);
 
+/* Default UV memory block size is 2GB */
+static unsigned long mem_block_size = (2UL << 30);
+
+/* Kernel parameter to specify UV mem block size */
+static int parse_mem_block_size(char *ptr)
+{
+       unsigned long size = memparse(ptr, NULL);
+
+       /* Size will be rounded down by set_block_size() below */
+       mem_block_size = size;
+       return 0;
+}
+early_param("uv_memblksize", parse_mem_block_size);
+
+static __init int adj_blksize(u32 lgre)
+{
+       unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
+       unsigned long size;
+
+       for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
+               if (IS_ALIGNED(base, size))
+                       break;
+
+       if (size >= mem_block_size)
+               return 0;
+
+       mem_block_size = size;
+       return 1;
+}
+
+static __init void set_block_size(void)
+{
+       unsigned int order = ffs(mem_block_size);
+
+       if (order) {
+               /* adjust for ffs return of 1..64 */
+               set_memory_block_size_order(order - 1);
+               pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
+       } else {
+               /* bad or zero value, default to 1UL << 31 (2GB) */
+               pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
+               set_memory_block_size_order(31);
+       }
+}
+
 /* Build GAM range lookup table: */
 static __init void build_uv_gr_table(void)
 {
@@ -1180,23 +1226,30 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
                                        << UV_GAM_RANGE_SHFT);
                int order = 0;
                char suffix[] = " KMGTPE";
+               int flag = ' ';
 
                while (size > 9999 && order < sizeof(suffix)) {
                        size /= 1024;
                        order++;
                }
 
+               /* adjust max block size to current range start */
+               if (gre->type == 1 || gre->type == 2)
+                       if (adj_blksize(lgre))
+                               flag = '*';
+
                if (!index) {
                        pr_info("UV: GAM Range Table...\n");
-                       pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
+                       pr_info("UV:  # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
                }
-               pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d   %04x  %02x %02x\n",
+               pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d   %04x  %02x %02x\n",
                        index++,
                        (unsigned long)lgre << UV_GAM_RANGE_SHFT,
                        (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
-                       size, suffix[order],
+                       flag, size, suffix[order],
                        gre->type, gre->nasid, gre->sockid, gre->pnode);
 
+               /* update to next range start */
                lgre = gre->limit;
                if (sock_min > gre->sockid)
                        sock_min = gre->sockid;
@@ -1427,6 +1480,7 @@ static void __init uv_system_init_hub(void)
 
        build_socket_tables();
        build_uv_gr_table();
+       set_block_size();
        uv_init_hub_info(&hub_info);
        uv_possible_blades = num_possible_nodes();
        if (!_node_to_pnode)
index cd0fda1fff6d3800fbbbf59a19711eba2df0f96c..404df26b7de89f03d495234c81247198dfadbad5 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/pgtable.h>
 #include <asm/set_memory.h>
 #include <asm/intel-family.h>
+#include <asm/hypervisor.h>
 
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
@@ -664,6 +665,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                if (boot_cpu_has(X86_FEATURE_PTI))
                        return sprintf(buf, "Mitigation: PTI\n");
 
+               if (hypervisor_is_type(X86_HYPER_XEN_PV))
+                       return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
+
                break;
 
        case X86_BUG_SPECTRE_V1:
index 38354c66df81144b7d2998ee42fce7a6b15485cd..0c5fcbd998cf11badefad906a2122400a3512d58 100644 (file)
@@ -671,7 +671,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
                        num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
 
                if (num_sharing_cache) {
-                       int bits = get_count_order(num_sharing_cache) - 1;
+                       int bits = get_count_order(num_sharing_cache);
 
                        per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
                }
index 0df7151cfef42cb908c9d76f0b4e78db1620f615..eb4cb3efd20e4cd5ec31d90c1dcf88b62b94622b 100644 (file)
@@ -1,3 +1,6 @@
+/* cpu_feature_enabled() cannot be used this early */
+#define USE_EARLY_PGTABLE_L5
+
 #include <linux/bootmem.h>
 #include <linux/linkage.h>
 #include <linux/bitops.h>
index 5bbd06f38ff68f58d1efc980db0fd9fc0af7d89a..f34d89c01edc5c761e0df331da1331f8a0f98f3a 100644 (file)
@@ -160,6 +160,11 @@ static struct severity {
                SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
                USER
                ),
+       MCESEV(
+               PANIC, "Data load in unrecoverable area of kernel",
+               SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
+               KERNEL
+               ),
 #endif
        MCESEV(
                PANIC, "Action required: unknown MCACOD",
index e4cf6ff1c2e1d341bb5ca890cd8dba266ce2aa18..c102ad51025e865c74e004fa4dc69f9c89a2d034 100644 (file)
@@ -772,23 +772,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                          struct pt_regs *regs)
 {
-       int i, ret = 0;
        char *tmp;
+       int i;
 
        for (i = 0; i < mca_cfg.banks; i++) {
                m->status = mce_rdmsrl(msr_ops.status(i));
-               if (m->status & MCI_STATUS_VAL) {
-                       __set_bit(i, validp);
-                       if (quirk_no_way_out)
-                               quirk_no_way_out(i, m, regs);
-               }
+               if (!(m->status & MCI_STATUS_VAL))
+                       continue;
+
+               __set_bit(i, validp);
+               if (quirk_no_way_out)
+                       quirk_no_way_out(i, m, regs);
 
                if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+                       mce_read_aux(m, i);
                        *msg = tmp;
-                       ret = 1;
+                       return 1;
                }
        }
-       return ret;
+       return 0;
 }
 
 /*
@@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                lmce = m.mcgstatus & MCG_STATUS_LMCES;
 
        /*
+        * Local machine check may already know that we have to panic.
+        * Broadcast machine check begins rendezvous in mce_start()
         * Go through all banks in exclusion of the other CPUs. This way we
         * don't report duplicated events on shared banks because the first one
-        * to see it will clear it. If this is a Local MCE, then no need to
-        * perform rendezvous.
+        * to see it will clear it.
         */
-       if (!lmce)
+       if (lmce) {
+               if (no_way_out)
+                       mce_panic("Fatal local machine check", &m, msg);
+       } else {
                order = mce_start(&no_way_out);
+       }
 
        for (i = 0; i < cfg->banks; i++) {
                __clear_bit(i, toclear);
@@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                        no_way_out = worst >= MCE_PANIC_SEVERITY;
        } else {
                /*
-                * Local MCE skipped calling mce_reign()
-                * If we found a fatal error, we need to panic here.
+                * If there was a fatal machine check we should have
+                * already called mce_panic earlier in this function.
+                * Since we re-read the banks, we might have found
+                * something new. Check again to see if we found a
+                * fatal error. We call "mce_severity()" again to
+                * make sure we have the right "msg".
                 */
-                if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
-                       mce_panic("Machine check from unknown source",
-                               NULL, NULL);
+               if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
+                       mce_severity(&m, cfg->tolerant, &msg, true);
+                       mce_panic("Local fatal machine check!", &m, msg);
+               }
        }
 
        /*
index 1c2cfa0644aa979c97cc01a42925a44c25f9f852..97ccf4c3b45bec517605813b1f24518b10466002 100644 (file)
@@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size)
                        p = memdup_patch(data, size);
                        if (!p)
                                pr_err("Error allocating buffer %p\n", data);
-                       else
+                       else {
                                list_replace(&iter->plist, &p->plist);
+                               kfree(iter->data);
+                               kfree(iter);
+                       }
                }
        }
 
index d1f25c83144752272401afe8c8aec313d40298ae..c88c23c658c1e99faad3daa236448bc4208901d7 100644 (file)
@@ -1248,6 +1248,7 @@ void __init e820__memblock_setup(void)
 {
        int i;
        u64 end;
+       u64 addr = 0;
 
        /*
         * The bootstrap memblock region count maximum is 128 entries
@@ -1264,13 +1265,21 @@ void __init e820__memblock_setup(void)
                struct e820_entry *entry = &e820_table->entries[i];
 
                end = entry->addr + entry->size;
+               if (addr < entry->addr)
+                       memblock_reserve(addr, entry->addr - addr);
+               addr = end;
                if (end != (resource_size_t)end)
                        continue;
 
+               /*
+                * all !E820_TYPE_RAM ranges (including gap ranges) are put
+                * into memblock.reserved to make sure that struct pages in
+                * such regions are not left uninitialized after bootup.
+                */
                if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
-                       continue;
-
-               memblock_add(entry->addr, entry->size);
+                       memblock_reserve(entry->addr, entry->size);
+               else
+                       memblock_add(entry->addr, entry->size);
        }
 
        /* Throw away partial pages: */
index a21d6ace648e3006045f5bd13578f3b29d4ea0bf..8047379e575ad39cb47cdbb055131e9bb094bb4d 100644 (file)
@@ -44,7 +44,7 @@ static unsigned int __initdata next_early_pgt;
 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
 
 #ifdef CONFIG_X86_5LEVEL
-unsigned int __pgtable_l5_enabled __initdata;
+unsigned int __pgtable_l5_enabled __ro_after_init;
 unsigned int pgdir_shift __ro_after_init = 39;
 EXPORT_SYMBOL(pgdir_shift);
 unsigned int ptrs_per_p4d __ro_after_init = 1;
index 697a4ce0430827c89be2cbd86caedfac97e884f7..736348ead4218a0007b715efbc1d56bd1bb73e65 100644 (file)
@@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
 /* Skylake */
 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
 {
-       u32 capid0;
+       u32 capid0, capid5;
 
        pci_read_config_dword(pdev, 0x84, &capid0);
+       pci_read_config_dword(pdev, 0x98, &capid5);
 
-       if ((capid0 & 0xc0) == 0xc0)
+       /*
+        * CAPID0{7:6} indicate whether this is an advanced RAS SKU
+        * CAPID5{8:5} indicate that various NVDIMM usage modes are
+        * enabled, so memory machine check recovery is also enabled.
+        */
+       if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
                static_branch_inc(&mcsafe_key);
+
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
index 445ca11ff8634eb27fb93f93fe362fc4bffaf588..92a3b312a53c465bbde5f006b5707b62671a49ae 100644 (file)
@@ -692,7 +692,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
         * Increment event counter and perform fixup for the pre-signal
         * frame.
         */
-       rseq_signal_deliver(regs);
+       rseq_signal_deliver(ksig, regs);
 
        /* Set up the stack frame */
        if (is_ia32_frame(ksig)) {
index a535dd64de6397b02b3f53cd685584ebf7ebf445..e6db475164edec4f33e6f056cde5cbdfbe51a556 100644 (file)
@@ -835,16 +835,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
        char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
                                                "simd exception";
 
-       if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
-               return;
        cond_local_irq_enable(regs);
 
        if (!user_mode(regs)) {
-               if (!fixup_exception(regs, trapnr)) {
-                       task->thread.error_code = error_code;
-                       task->thread.trap_nr = trapnr;
+               if (fixup_exception(regs, trapnr))
+                       return;
+
+               task->thread.error_code = error_code;
+               task->thread.trap_nr = trapnr;
+
+               if (notify_die(DIE_TRAP, str, regs, error_code,
+                                       trapnr, SIGFPE) != NOTIFY_STOP)
                        die(str, regs, error_code);
-               }
                return;
        }
 
index 58d8d800875d0c6a3789a0406fec1eed366eecfc..deb576b23b7cf49817533d00555d0dc976c42486 100644 (file)
@@ -293,7 +293,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
        insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
        /* has the side-effect of processing the entire instruction */
        insn_get_length(insn);
-       if (WARN_ON_ONCE(!insn_complete(insn)))
+       if (!insn_complete(insn))
                return -ENOEXEC;
 
        if (is_prefix_bad(insn))
index 559a12b6184de38c67ef4f2001963600f41f8753..1689f433f3a081382795ae16120710329b73a6ad 100644 (file)
@@ -1705,6 +1705,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
                MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
 }
 
+static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
+{
+       return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
+}
+
+static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
+{
+       return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
+                       CPU_BASED_MONITOR_TRAP_FLAG;
+}
+
 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
 {
        return vmcs12->cpu_based_vm_exec_control & bit;
@@ -11620,6 +11631,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
            !nested_cr3_valid(vcpu, vmcs12->host_cr3))
                return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
 
+       /*
+        * From the Intel SDM, volume 3:
+        * Fields relevant to VM-entry event injection must be set properly.
+        * These fields are the VM-entry interruption-information field, the
+        * VM-entry exception error code, and the VM-entry instruction length.
+        */
+       if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
+               u32 intr_info = vmcs12->vm_entry_intr_info_field;
+               u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
+               u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
+               bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
+               bool should_have_error_code;
+               bool urg = nested_cpu_has2(vmcs12,
+                                          SECONDARY_EXEC_UNRESTRICTED_GUEST);
+               bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
+
+               /* VM-entry interruption-info field: interruption type */
+               if (intr_type == INTR_TYPE_RESERVED ||
+                   (intr_type == INTR_TYPE_OTHER_EVENT &&
+                    !nested_cpu_supports_monitor_trap_flag(vcpu)))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: vector */
+               if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
+                   (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
+                   (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: deliver error code */
+               should_have_error_code =
+                       intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
+                       x86_exception_has_error_code(vector);
+               if (has_error_code != should_have_error_code)
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry exception error code */
+               if (has_error_code &&
+                   vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: reserved bits */
+               if (intr_info & INTR_INFO_RESVD_BITS_MASK)
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry instruction length */
+               switch (intr_type) {
+               case INTR_TYPE_SOFT_EXCEPTION:
+               case INTR_TYPE_SOFT_INTR:
+               case INTR_TYPE_PRIV_SW_EXCEPTION:
+                       if ((vmcs12->vm_entry_instruction_len > 15) ||
+                           (vmcs12->vm_entry_instruction_len == 0 &&
+                            !nested_cpu_has_zero_length_injection(vcpu)))
+                               return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+               }
+       }
+
        return 0;
 }
 
index 331993c49dae9bd852c759afecbb3c6c17477e15..257f27620bc272e3312295714a120de07963441f 100644 (file)
@@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
 #endif
 }
 
+static inline bool x86_exception_has_error_code(unsigned int vector)
+{
+       static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
+                       BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
+                       BIT(PF_VECTOR) | BIT(AC_VECTOR);
+
+       return (1U << vector) & exception_has_error_code;
+}
+
 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
index 9a84a0d08727b7452ebea1e2e35b5ad3eb0b6e79..2aafa6ab6103d150ad26e097221a972f5d16363f 100644 (file)
@@ -641,11 +641,6 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
        return 0;
 }
 
-static const char nx_warning[] = KERN_CRIT
-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
-static const char smep_warning[] = KERN_CRIT
-"unable to execute userspace code (SMEP?) (uid: %d)\n";
-
 static void
 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
                unsigned long address)
@@ -664,20 +659,18 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
                pte = lookup_address_in_pgd(pgd, address, &level);
 
                if (pte && pte_present(*pte) && !pte_exec(*pte))
-                       printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
+                       pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
+                               from_kuid(&init_user_ns, current_uid()));
                if (pte && pte_present(*pte) && pte_exec(*pte) &&
                                (pgd_flags(*pgd) & _PAGE_USER) &&
                                (__read_cr4() & X86_CR4_SMEP))
-                       printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
+                       pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
+                               from_kuid(&init_user_ns, current_uid()));
        }
 
-       printk(KERN_ALERT "BUG: unable to handle kernel ");
-       if (address < PAGE_SIZE)
-               printk(KERN_CONT "NULL pointer dereference");
-       else
-               printk(KERN_CONT "paging request");
-
-       printk(KERN_CONT " at %px\n", (void *) address);
+       pr_alert("BUG: unable to handle kernel %s at %px\n",
+                address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
+                (void *)address);
 
        dump_pagetable(address);
 }
index 045f492d5f68260a581f44c210aa3753dc4bc225..a688617c727e1ec3558e158156c98b4632b0d9a2 100644 (file)
@@ -1350,16 +1350,28 @@ int kern_addr_valid(unsigned long addr)
 /* Amount of ram needed to start using large blocks */
 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
 
+/* Adjustable memory block size */
+static unsigned long set_memory_block_size;
+int __init set_memory_block_size_order(unsigned int order)
+{
+       unsigned long size = 1UL << order;
+
+       if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
+               return -EINVAL;
+
+       set_memory_block_size = size;
+       return 0;
+}
+
 static unsigned long probe_memory_block_size(void)
 {
        unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
        unsigned long bz;
 
-       /* If this is UV system, always set 2G block size */
-       if (is_uv_system()) {
-               bz = MAX_BLOCK_SIZE;
+       /* If memory block size has been set, then use it */
+       bz = set_memory_block_size;
+       if (bz)
                goto done;
-       }
 
        /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
        if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
index e01f7ceb9e7a17436eb71634c5467bbb20a2a2de..77873ce700ae7d703a3385719282dbd31a092ed9 100644 (file)
@@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
                pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
 
-               if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+               if (!pgd_present(*pgd))
                        continue;
 
                for (i = 0; i < PTRS_PER_P4D; i++) {
                        p4d = p4d_offset(pgd,
                                         pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
 
-                       if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+                       if (!p4d_present(*p4d))
                                continue;
 
                        pud = (pud_t *)p4d_page_vaddr(*p4d);
index c9081c6671f0b7a05ecfaaf206e7e1ed2b1f456a..3b5318505c69c487f8cfc9c46c93c526197caef6 100644 (file)
@@ -64,6 +64,13 @@ struct shared_info xen_dummy_shared_info;
 __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
+/*
+ * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
+ * before clearing the bss.
+ */
+uint32_t xen_start_flags __attribute__((section(".data"))) = 0;
+EXPORT_SYMBOL(xen_start_flags);
+
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
index 357969a3697cc7af6e08c12144ec06f43a8841ad..8d4e2e1ae60bc93d3391dfc8c12b10fc0557f242 100644 (file)
@@ -1203,6 +1203,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
                return;
 
        xen_domain_type = XEN_PV_DOMAIN;
+       xen_start_flags = xen_start_info->flags;
 
        xen_setup_features();
 
index aa1c6a6831a94dd383e11c575a2a0c91f32136b5..c85d1a88f47693232369411588cfc19084086b25 100644 (file)
@@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void)
        }
 
        xen_pvh = 1;
+       xen_start_flags = pvh_start_info.flags;
 
        msr = cpuid_ebx(xen_cpuid_base() + 2);
        pfn = __pa(hypercall_page);
index 2e20ae2fa2d6c3b865f2c745ad9896a752954907..e3b18ad49889afc5ae35d2e2796aecd108a93819 100644 (file)
@@ -32,6 +32,7 @@
 #include <xen/interface/vcpu.h>
 #include <xen/interface/xenpmu.h>
 
+#include <asm/spec-ctrl.h>
 #include <asm/xen/interface.h>
 #include <asm/xen/hypercall.h>
 
@@ -70,6 +71,8 @@ static void cpu_bringup(void)
        cpu_data(cpu).x86_max_cores = 1;
        set_cpu_sibling_map(cpu);
 
+       speculative_store_bypass_ht_init();
+
        xen_setup_cpu_clockevents();
 
        notify_cpu_starting(cpu);
@@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
        }
        set_cpu_sibling_map(0);
 
+       speculative_store_bypass_ht_init();
+
        xen_pmu_init(0);
 
        if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
index 75a07b8119a96b9eea2258c7d235972552265f46..1de07a7f76806984e504bc03638df7e0610a3455 100644 (file)
 
 #define SO_ZEROCOPY            60
 
+#define SO_TXTIME              61
+#define SCM_TXTIME             SO_TXTIME
+
 #endif /* _XTENSA_SOCKET_H */
index 9710e275f23079b8b7548ee935ab653036e82c5d..67eff5eddc49190fe8be721edd7031e8b6284a1e 100644 (file)
@@ -1807,9 +1807,6 @@ void bio_endio(struct bio *bio)
        if (!bio_integrity_endio(bio))
                return;
 
-       if (WARN_ONCE(bio->bi_next, "driver left bi_next not NULL"))
-               bio->bi_next = NULL;
-
        /*
         * Need to have a real endio function for chained bios, otherwise
         * various corner cases will break (like stacking block devices that
index cf0ee764b908b384f69be9efbb9d7a1352eb7a52..f84a9b7b6f5aa167c5559079f095e3d3dff28f0d 100644 (file)
@@ -273,10 +273,6 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
        bio_advance(bio, nbytes);
 
        /* don't actually finish bio if it's part of flush sequence */
-       /*
-        * XXX this code looks suspicious - it's not consistent with advancing
-        * req->bio in caller
-        */
        if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
                bio_endio(bio);
 }
@@ -3081,10 +3077,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
                struct bio *bio = req->bio;
                unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
 
-               if (bio_bytes == bio->bi_iter.bi_size) {
+               if (bio_bytes == bio->bi_iter.bi_size)
                        req->bio = bio->bi_next;
-                       bio->bi_next = NULL;
-               }
 
                /* Completion has already been traced */
                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
@@ -3479,6 +3473,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
        dst->cpu = src->cpu;
        dst->__sector = blk_rq_pos(src);
        dst->__data_len = blk_rq_bytes(src);
+       if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
+               dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
+               dst->special_vec = src->special_vec;
+       }
        dst->nr_phys_segments = src->nr_phys_segments;
        dst->ioprio = src->ioprio;
        dst->extra_len = src->extra_len;
index ffa622366922fed04e9dbd2606ffa294b25a697b..1c4532e9293800662d92b809d78637e06607fd90 100644 (file)
@@ -356,7 +356,7 @@ static const char *const blk_mq_rq_state_name_array[] = {
 
 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
 {
-       if (WARN_ON_ONCE((unsigned int)rq_state >
+       if (WARN_ON_ONCE((unsigned int)rq_state >=
                         ARRAY_SIZE(blk_mq_rq_state_name_array)))
                return "(?)";
        return blk_mq_rq_state_name_array[rq_state];
index 70c65bb6c0131c84130fae44808acb51cf427ace..95919268564b162ed291a683dd5c27668cad0834 100644 (file)
@@ -781,7 +781,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
                WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
        }
 
-       req->rq_flags &= ~RQF_TIMED_OUT;
        blk_add_timer(req);
 }
 
@@ -1076,6 +1075,9 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
 
 #define BLK_MQ_RESOURCE_DELAY  3               /* ms units */
 
+/*
+ * Returns true if we did some work AND can potentially do more.
+ */
 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                             bool got_budget)
 {
@@ -1206,8 +1208,17 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                        blk_mq_run_hw_queue(hctx, true);
                else if (needs_restart && (ret == BLK_STS_RESOURCE))
                        blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
+
+               return false;
        }
 
+       /*
+        * If the host/device is unable to accept more work, inform the
+        * caller of that.
+        */
+       if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
+               return false;
+
        return (queued + errors) != 0;
 }
 
index 01e2b353a2b9aadc2b5d20fe227739d1d0ae296b..15c1f5e12eb89460bc42eb7f5807eaa03254e51d 100644 (file)
@@ -144,6 +144,7 @@ void __blk_complete_request(struct request *req)
 
        local_irq_restore(flags);
 }
+EXPORT_SYMBOL(__blk_complete_request);
 
 /**
  * blk_complete_request - end I/O on a request
index 4b8a48d48ba13394cf0ae7a7dc0016696ae5efd6..f2cfd56e1606ed9d8e1da979a1e1e6cdcb506a38 100644 (file)
@@ -210,6 +210,7 @@ void blk_add_timer(struct request *req)
        if (!req->timeout)
                req->timeout = q->rq_timeout;
 
+       req->rq_flags &= ~RQF_TIMED_OUT;
        blk_rq_set_deadline(req, jiffies + req->timeout);
 
        /*
index 945f4b8610e0c7d85242b500141d4bc1c0f671a2..e0de4dd448b3c7238e8656b572de72206302bf87 100644 (file)
@@ -877,7 +877,7 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
                return 0;
        }
 
-       if (n > resp->num) {
+       if (n >= resp->num) {
                pr_debug("Response has %d tokens. Can't access %d\n",
                         resp->num, n);
                return 0;
@@ -916,7 +916,7 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n)
                return 0;
        }
 
-       if (n > resp->num) {
+       if (n >= resp->num) {
                pr_debug("Response has %d tokens. Can't access %d\n",
                         resp->num, n);
                return 0;
index 150d82da8e996d8c28be4ae3e4bd64a0331c9c08..1efd6fa0dc608c2a3d598b56c798f3e772a2bdbc 100644 (file)
@@ -1,3 +1,3 @@
 #include <linux/kernel.h>
 
-extern const char __initdata *const blacklist_hashes[];
+extern const char __initconst *const blacklist_hashes[];
index 49fa8582138b2df45e087f3a31a80ac5d5bbdc2a..314c52c967e5882a72b26adf821fd92a2bef5a02 100644 (file)
@@ -1060,12 +1060,19 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
 }
 EXPORT_SYMBOL_GPL(af_alg_async_cb);
 
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
+/**
+ * af_alg_poll - poll system call handler
+ */
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+                        poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
        struct af_alg_ctx *ctx = ask->private;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        if (!ctx->more || ctx->used)
                mask |= EPOLLIN | EPOLLRDNORM;
@@ -1075,7 +1082,7 @@ __poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL_GPL(af_alg_poll_mask);
+EXPORT_SYMBOL_GPL(af_alg_poll);
 
 /**
  * af_alg_alloc_areq - allocate struct af_alg_async_req
index 825524f274384fdfd2a569be01e593d8f41a72b2..c40a8c7ee8aedcb0f6adb3afb1e0bb60a233d68c 100644 (file)
@@ -375,7 +375,7 @@ static struct proto_ops algif_aead_ops = {
        .sendmsg        =       aead_sendmsg,
        .sendpage       =       af_alg_sendpage,
        .recvmsg        =       aead_recvmsg,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static int aead_check_key(struct socket *sock)
@@ -471,7 +471,7 @@ static struct proto_ops algif_aead_ops_nokey = {
        .sendmsg        =       aead_sendmsg_nokey,
        .sendpage       =       aead_sendpage_nokey,
        .recvmsg        =       aead_recvmsg_nokey,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static void *aead_bind(const char *name, u32 type, u32 mask)
index 4c04eb9888adf82f68a18d17c9d6e73adc74aa90..cfdaab2b7d766d517e239687bf2232e09a749991 100644 (file)
@@ -206,7 +206,7 @@ static struct proto_ops algif_skcipher_ops = {
        .sendmsg        =       skcipher_sendmsg,
        .sendpage       =       af_alg_sendpage,
        .recvmsg        =       skcipher_recvmsg,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static int skcipher_check_key(struct socket *sock)
@@ -302,7 +302,7 @@ static struct proto_ops algif_skcipher_ops_nokey = {
        .sendmsg        =       skcipher_sendmsg_nokey,
        .sendpage       =       skcipher_sendpage_nokey,
        .recvmsg        =       skcipher_recvmsg_nokey,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static void *skcipher_bind(const char *name, u32 type, u32 mask)
index 7d81e6bb461a330a225658dde9b002b3b24e26bc..b6cabac4b62ba6b920cb5947c56db5839711bcc7 100644 (file)
@@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen,
                return -EINVAL;
        }
 
+       if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
+               /* Discard the BIT STRING metadata */
+               if (vlen < 1 || *(const u8 *)value != 0)
+                       return -EBADMSG;
+
+               value++;
+               vlen--;
+       }
+
        ctx->cert->raw_sig = value;
        ctx->cert->raw_sig_size = vlen;
        return 0;
index 9fbcde307daf90b554ac5e96da627f0f77eb24e9..5eede3749e646b425614aa86de9143c82545fcc6 100644 (file)
@@ -274,8 +274,9 @@ static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst,
                union morus640_block_in tail;
 
                memcpy(tail.bytes, src, size);
+               memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
 
-               crypto_morus640_load_a(&m, src);
+               crypto_morus640_load_a(&m, tail.bytes);
                crypto_morus640_core(state, &m);
                crypto_morus640_store_a(tail.bytes, &m);
                memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
index 264ec12c0b9c334a16d743651771a18b616616a0..7f6735d9003f13c1e4adb7ffde8d8be5cc700fe1 100644 (file)
@@ -152,7 +152,7 @@ static SHA3_INLINE void keccakf_round(u64 st[25])
        st[24] ^= bc[ 4];
 }
 
-static void __optimize("O3") keccakf(u64 st[25])
+static void keccakf(u64 st[25])
 {
        int round;
 
index 38a286975c31e152206b3e55b28473a2763a717a..f8fecfec5df9b85be3e0d6c78a54087952140fdc 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
 #include <linux/pwm.h>
+#include <linux/suspend.h>
 #include <linux/delay.h>
 
 #include "internal.h"
@@ -946,9 +947,10 @@ static void lpss_iosf_exit_d3_state(void)
        mutex_unlock(&lpss_iosf_mutex);
 }
 
-static int acpi_lpss_suspend(struct device *dev, bool wakeup)
+static int acpi_lpss_suspend(struct device *dev, bool runtime)
 {
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+       bool wakeup = runtime || device_may_wakeup(dev);
        int ret;
 
        if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -961,13 +963,14 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup)
         * wrong status for devices being about to be powered off. See
         * lpss_iosf_enter_d3_state() for further information.
         */
-       if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+       if ((runtime || !pm_suspend_via_firmware()) &&
+           lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
                lpss_iosf_enter_d3_state();
 
        return ret;
 }
 
-static int acpi_lpss_resume(struct device *dev)
+static int acpi_lpss_resume(struct device *dev, bool runtime)
 {
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
        int ret;
@@ -976,7 +979,8 @@ static int acpi_lpss_resume(struct device *dev)
         * This call is kept first to be in symmetry with
         * acpi_lpss_runtime_suspend() one.
         */
-       if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+       if ((runtime || !pm_resume_via_firmware()) &&
+           lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
                lpss_iosf_exit_d3_state();
 
        ret = acpi_dev_resume(dev);
@@ -1000,12 +1004,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
                return 0;
 
        ret = pm_generic_suspend_late(dev);
-       return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
+       return ret ? ret : acpi_lpss_suspend(dev, false);
 }
 
 static int acpi_lpss_resume_early(struct device *dev)
 {
-       int ret = acpi_lpss_resume(dev);
+       int ret = acpi_lpss_resume(dev, false);
 
        return ret ? ret : pm_generic_resume_early(dev);
 }
@@ -1020,7 +1024,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
 
 static int acpi_lpss_runtime_resume(struct device *dev)
 {
-       int ret = acpi_lpss_resume(dev);
+       int ret = acpi_lpss_resume(dev, true);
 
        return ret ? ret : pm_generic_runtime_resume(dev);
 }
index bb94cf0731feb92b89b78cec274668fe204ed1e4..442a9e24f4397674041fa9f557bd90fef7763297 100644 (file)
@@ -2037,6 +2037,17 @@ static inline void acpi_ec_query_exit(void)
        }
 }
 
+static const struct dmi_system_id acpi_ec_no_wakeup[] = {
+       {
+               .ident = "Thinkpad X1 Carbon 6th",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"),
+               },
+       },
+       { },
+};
+
 int __init acpi_ec_init(void)
 {
        int result;
@@ -2047,6 +2058,15 @@ int __init acpi_ec_init(void)
        if (result)
                return result;
 
+       /*
+        * Disable EC wakeup on following systems to prevent periodic
+        * wakeup from EC GPE.
+        */
+       if (dmi_check_system(acpi_ec_no_wakeup)) {
+               ec_no_wakeup = true;
+               pr_debug("Disabling EC wakeup on suspend-to-idle\n");
+       }
+
        /* Drivers must be started after acpi_ec_query_init() */
        dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
        /*
index 7ca41bf023c9f354cad85617f0cceab228640d65..8df9abfa947b0dca4719c674fd645d187ade242d 100644 (file)
@@ -45,6 +45,8 @@
 #include <linux/uaccess.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 
+#include "acpica/accommon.h"
+#include "acpica/acnamesp.h"
 #include "internal.h"
 
 #define _COMPONENT             ACPI_OS_SERVICES
@@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
 }
 EXPORT_SYMBOL(acpi_check_region);
 
+static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
+                                             void *_res, void **return_value)
+{
+       struct acpi_mem_space_context **mem_ctx;
+       union acpi_operand_object *handler_obj;
+       union acpi_operand_object *region_obj2;
+       union acpi_operand_object *region_obj;
+       struct resource *res = _res;
+       acpi_status status;
+
+       region_obj = acpi_ns_get_attached_object(handle);
+       if (!region_obj)
+               return AE_OK;
+
+       handler_obj = region_obj->region.handler;
+       if (!handler_obj)
+               return AE_OK;
+
+       if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+               return AE_OK;
+
+       if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
+               return AE_OK;
+
+       region_obj2 = acpi_ns_get_secondary_object(region_obj);
+       if (!region_obj2)
+               return AE_OK;
+
+       mem_ctx = (void *)&region_obj2->extra.region_context;
+
+       if (!(mem_ctx[0]->address >= res->start &&
+             mem_ctx[0]->address < res->end))
+               return AE_OK;
+
+       status = handler_obj->address_space.setup(region_obj,
+                                                 ACPI_REGION_DEACTIVATE,
+                                                 NULL, (void **)mem_ctx);
+       if (ACPI_SUCCESS(status))
+               region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
+
+       return status;
+}
+
+/**
+ * acpi_release_memory - Release any mappings done to a memory region
+ * @handle: Handle to namespace node
+ * @res: Memory resource
+ * @level: A level that terminates the search
+ *
+ * Walks through @handle and unmaps all SystemMemory Operation Regions that
+ * overlap with @res and that have already been activated (mapped).
+ *
+ * This is a helper that allows drivers to place special requirements on memory
+ * region that may overlap with operation regions, primarily allowing them to
+ * safely map the region as non-cached memory.
+ *
+ * The unmapped Operation Regions will be automatically remapped next time they
+ * are called, so the drivers do not need to do anything else.
+ */
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+                               u32 level)
+{
+       if (!(res->flags & IORESOURCE_MEM))
+               return AE_TYPE;
+
+       return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
+                                  acpi_deactivate_mem_region, NULL, res, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_release_memory);
+
 /*
  * Let drivers know whether the resource checks are effective
  */
index ff81a576347e5154c10c997717548be69e81bbab..82532c299bb5964a429e81353b9c5f94d9bb5ed2 100644 (file)
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
        skb_queue_head_init(&iadev->rx_dma_q);  
        iadev->rx_free_desc_qhead = NULL;   
 
-       iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
+       iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
        if (!iadev->rx_open) {
                printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
                dev->number);  
index a8d2eb0ceb8d8f78788182f81f8e1e9f9dc8fbbb..e89146ddede693a400b0d7019f14ca92aa4dff8e 100644 (file)
@@ -1385,14 +1385,12 @@ static void zatm_close(struct atm_vcc *vcc)
 
 static int zatm_open(struct atm_vcc *vcc)
 {
-       struct zatm_dev *zatm_dev;
        struct zatm_vcc *zatm_vcc;
        short vpi = vcc->vpi;
        int vci = vcc->vci;
        int error;
 
        DPRINTK(">zatm_open\n");
-       zatm_dev = ZATM_DEV(vcc->dev);
        if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
                vcc->dev_data = NULL;
        if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
@@ -1483,6 +1481,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
                                        return -EFAULT;
                                if (pool < 0 || pool > ZATM_LAST_POOL)
                                        return -EINVAL;
+                               pool = array_index_nospec(pool,
+                                                         ZATM_LAST_POOL + 1);
                                if (copy_from_user(&info,
                                    &((struct zatm_pool_req __user *) arg)->info,
                                    sizeof(info))) return -EFAULT;
index b074f242a43594fc3d3a383a9dce5d126e8f3a78..704f442958103545aa89ad0e986130aa6ebc5b06 100644 (file)
@@ -8,10 +8,7 @@ obj-y                  := component.o core.o bus.o dd.o syscore.o \
                           topology.o container.o property.o cacheinfo.o \
                           devcon.o
 obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
 obj-y                  += power/
-obj-$(CONFIG_HAS_DMA)  += dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
 obj-$(CONFIG_ISA_BUS_API)      += isa.o
 obj-y                          += firmware_loader/
 obj-$(CONFIG_NUMA)     += node.o
index 36622b52e419db9573c5cfb31f03bf740324e961..df3e1a44707acc74010cf5ce6fab815c4f744896 100644 (file)
@@ -236,6 +236,13 @@ struct device_link *device_link_add(struct device *consumer,
                        link->rpm_active = true;
                }
                pm_runtime_new_link(consumer);
+               /*
+                * If the link is being added by the consumer driver at probe
+                * time, balance the decrementation of the supplier's runtime PM
+                * usage counter after consumer probe in driver_probe_device().
+                */
+               if (consumer->links.status == DL_DEV_PROBING)
+                       pm_runtime_get_noresume(supplier);
        }
        get_device(supplier);
        link->supplier = supplier;
@@ -255,12 +262,12 @@ struct device_link *device_link_add(struct device *consumer,
                        switch (consumer->links.status) {
                        case DL_DEV_PROBING:
                                /*
-                                * Balance the decrementation of the supplier's
-                                * runtime PM usage counter after consumer probe
-                                * in driver_probe_device().
+                                * Some callers expect the link creation during
+                                * consumer driver probe to resume the supplier
+                                * even without DL_FLAG_RPM_ACTIVE.
                                 */
                                if (flags & DL_FLAG_PM_RUNTIME)
-                                       pm_runtime_get_sync(supplier);
+                                       pm_runtime_resume(supplier);
 
                                link->status = DL_STATE_CONSUMER_PROBE;
                                break;
index 4925af5c4cf039e6cc07918967aa6995353e4bd8..c298de8a8308b89569ad4e2d750f9c4c721ed20d 100644 (file)
@@ -2487,10 +2487,9 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
  * power domain corresponding to a DT node's "required-opps" property.
  *
  * @dev: Device for which the performance-state needs to be found.
- * @opp_node: DT node where the "required-opps" property is present. This can be
+ * @np: DT node where the "required-opps" property is present. This can be
  *     the device node itself (if it doesn't have an OPP table) or a node
  *     within the OPP table of a device (if device has an OPP table).
- * @state: Pointer to return performance state.
  *
  * Returns performance state corresponding to the "required-opps" property of
  * a DT node. This calls platform specific genpd->opp_to_performance_state()
@@ -2499,7 +2498,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
  * Returns performance state on success and 0 on failure.
  */
 unsigned int of_genpd_opp_to_performance_state(struct device *dev,
-                                              struct device_node *opp_node)
+                                              struct device_node *np)
 {
        struct generic_pm_domain *genpd;
        struct dev_pm_opp *opp;
@@ -2514,7 +2513,7 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev,
 
        genpd_lock(genpd);
 
-       opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node);
+       opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
        if (IS_ERR(opp)) {
                dev_err(dev, "Failed to find required OPP: %ld\n",
                        PTR_ERR(opp));
index a47e4987ee467ed04578b47499c29def872525f6..d146fedc38bb26535e3960963058a9635e5d7f7b 100644 (file)
@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
        _drbd_start_io_acct(device, req);
 
        /* process discards always from our submitter thread */
-       if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) ||
-           (bio_op(bio) & REQ_OP_DISCARD))
+       if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
+           bio_op(bio) == REQ_OP_DISCARD)
                goto queue_for_submitter_thread;
 
        if (rw == WRITE && req->private_bio && req->i.size
index 3b7083b8ecbb3b0ffcad0d2879954780075333ee..74a05561b620a3be51bd30a09f1eeaa1b9168876 100644 (file)
@@ -76,6 +76,7 @@ struct link_dead_args {
 #define NBD_HAS_CONFIG_REF             4
 #define NBD_BOUND                      5
 #define NBD_DESTROY_ON_DISCONNECT      6
+#define NBD_DISCONNECT_ON_CLOSE        7
 
 struct nbd_config {
        u32 flags;
@@ -138,6 +139,7 @@ static void nbd_config_put(struct nbd_device *nbd);
 static void nbd_connect_reply(struct genl_info *info, int index);
 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
 static void nbd_dead_link_work(struct work_struct *work);
+static void nbd_disconnect_and_put(struct nbd_device *nbd);
 
 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
 {
@@ -1305,6 +1307,12 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
 static void nbd_release(struct gendisk *disk, fmode_t mode)
 {
        struct nbd_device *nbd = disk->private_data;
+       struct block_device *bdev = bdget_disk(disk, 0);
+
+       if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
+                       bdev->bd_openers == 0)
+               nbd_disconnect_and_put(nbd);
+
        nbd_config_put(nbd);
        nbd_put(nbd);
 }
@@ -1705,6 +1713,10 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
                                &config->runtime_flags);
                        put_dev = true;
                }
+               if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+                       set_bit(NBD_DISCONNECT_ON_CLOSE,
+                               &config->runtime_flags);
+               }
        }
 
        if (info->attrs[NBD_ATTR_SOCKETS]) {
@@ -1749,6 +1761,17 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
        return ret;
 }
 
+static void nbd_disconnect_and_put(struct nbd_device *nbd)
+{
+       mutex_lock(&nbd->config_lock);
+       nbd_disconnect(nbd);
+       nbd_clear_sock(nbd);
+       mutex_unlock(&nbd->config_lock);
+       if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+                              &nbd->config->runtime_flags))
+               nbd_config_put(nbd);
+}
+
 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
 {
        struct nbd_device *nbd;
@@ -1781,13 +1804,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
                nbd_put(nbd);
                return 0;
        }
-       mutex_lock(&nbd->config_lock);
-       nbd_disconnect(nbd);
-       nbd_clear_sock(nbd);
-       mutex_unlock(&nbd->config_lock);
-       if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
-                              &nbd->config->runtime_flags))
-               nbd_config_put(nbd);
+       nbd_disconnect_and_put(nbd);
        nbd_config_put(nbd);
        nbd_put(nbd);
        return 0;
@@ -1798,7 +1815,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
        struct nbd_device *nbd = NULL;
        struct nbd_config *config;
        int index;
-       int ret = -EINVAL;
+       int ret = 0;
        bool put_dev = false;
 
        if (!netlink_capable(skb, CAP_SYS_ADMIN))
@@ -1838,6 +1855,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
            !nbd->task_recv) {
                dev_err(nbd_to_dev(nbd),
                        "not configured, cannot reconfigure\n");
+               ret = -EINVAL;
                goto out;
        }
 
@@ -1862,6 +1880,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
                                               &config->runtime_flags))
                                refcount_inc(&nbd->refs);
                }
+
+               if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+                       set_bit(NBD_DISCONNECT_ON_CLOSE,
+                                       &config->runtime_flags);
+               } else {
+                       clear_bit(NBD_DISCONNECT_ON_CLOSE,
+                                       &config->runtime_flags);
+               }
        }
 
        if (info->attrs[NBD_ATTR_SOCKETS]) {
index 7948049f6c4321b02e1611383dae1be86a7748f1..042c778e5a4e0bf2009c38a6b1cf37bc5d23ce89 100644 (file)
@@ -1365,7 +1365,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
 static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
 {
        pr_info("null: rq %p timed out\n", rq);
-       blk_mq_complete_request(rq);
+       __blk_complete_request(rq);
        return BLK_EH_DONE;
 }
 
index 14d159e2042d5c488c1e23b3247508aab0a2ebff..2dc33e65d2d0c957199f1e3c1bf8028d4e09ca88 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/unaligned/le_struct.h>
+#include <asm/unaligned.h>
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
index 91bb98c42a1ca76376ae0db4b43fd7a89fca27b0..aaf9e5afaad435e2342a15fc963aa91367079957 100644 (file)
@@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register);
 
 void hwrng_unregister(struct hwrng *rng)
 {
+       int err;
+
        mutex_lock(&rng_mutex);
 
        list_del(&rng->list);
-       if (current_rng == rng)
-               enable_best_rng();
+       if (current_rng == rng) {
+               err = enable_best_rng();
+               if (err) {
+                       drop_current_rng();
+                       cur_rng_set_by_user = 0;
+               }
+       }
 
        if (list_empty(&rng_list)) {
                mutex_unlock(&rng_mutex);
index a8fb0020ba5ccfb9f4b72b689544299815fab60a..cd888d4ee605e0b9cf5f4970d34989d4b72f3b72 100644 (file)
@@ -402,7 +402,8 @@ static struct poolinfo {
 /*
  * Static global variables
  */
-static DECLARE_WAIT_QUEUE_HEAD(random_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 static struct fasync_struct *fasync;
 
 static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -721,8 +722,8 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
 
                /* should we wake readers? */
                if (entropy_bits >= random_read_wakeup_bits &&
-                   wq_has_sleeper(&random_wait)) {
-                       wake_up_interruptible_poll(&random_wait, POLLIN);
+                   wq_has_sleeper(&random_read_wait)) {
+                       wake_up_interruptible(&random_read_wait);
                        kill_fasync(&fasync, SIGIO, POLL_IN);
                }
                /* If the input pool is getting full, send some
@@ -1396,7 +1397,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
        trace_debit_entropy(r->name, 8 * ibytes);
        if (ibytes &&
            (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
-               wake_up_interruptible_poll(&random_wait, POLLOUT);
+               wake_up_interruptible(&random_write_wait);
                kill_fasync(&fasync, SIGIO, POLL_OUT);
        }
 
@@ -1838,7 +1839,7 @@ _random_read(int nonblock, char __user *buf, size_t nbytes)
                if (nonblock)
                        return -EAGAIN;
 
-               wait_event_interruptible(random_wait,
+               wait_event_interruptible(random_read_wait,
                        ENTROPY_BITS(&input_pool) >=
                        random_read_wakeup_bits);
                if (signal_pending(current))
@@ -1875,17 +1876,14 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
        return ret;
 }
 
-static struct wait_queue_head *
-random_get_poll_head(struct file *file, __poll_t events)
-{
-       return &random_wait;
-}
-
 static __poll_t
-random_poll_mask(struct file *file, __poll_t events)
+random_poll(struct file *file, poll_table * wait)
 {
-       __poll_t mask = 0;
+       __poll_t mask;
 
+       poll_wait(file, &random_read_wait, wait);
+       poll_wait(file, &random_write_wait, wait);
+       mask = 0;
        if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
                mask |= EPOLLIN | EPOLLRDNORM;
        if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
@@ -1992,8 +1990,7 @@ static int random_fasync(int fd, struct file *filp, int on)
 const struct file_operations random_fops = {
        .read  = random_read,
        .write = random_write,
-       .get_poll_head  = random_get_poll_head,
-       .poll_mask  = random_poll_mask,
+       .poll  = random_poll,
        .unlocked_ioctl = random_ioctl,
        .fasync = random_fasync,
        .llseek = noop_llseek,
@@ -2326,7 +2323,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
         * We'll be woken up again once below random_write_wakeup_thresh,
         * or when the calling thread is about to terminate.
         */
-       wait_event_interruptible(random_wait, kthread_should_stop() ||
+       wait_event_interruptible(random_write_wait, kthread_should_stop() ||
                        ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
        mix_pool_bytes(poolp, buffer, count);
        credit_entropy_bits(poolp, entropy);
index e5cdc3af684cbbe2370406c79af63df143776cd3..2717f88c79040a1ec26e2dd0d01f5d2dd25d9734 100644 (file)
@@ -304,8 +304,10 @@ static int __init stm32_timer_init(struct device_node *node)
 
        to->private_data = kzalloc(sizeof(struct stm32_timer_private),
                                   GFP_KERNEL);
-       if (!to->private_data)
+       if (!to->private_data) {
+               ret = -ENOMEM;
                goto deinit;
+       }
 
        rstc = of_reset_control_get(node, NULL);
        if (!IS_ERR(rstc)) {
index e718b8c69a566713362f16c7d6795a86f4c79912..eeb7d31cbda5b2794afad4c9fd0ab65c8849240d 100644 (file)
@@ -19,6 +19,7 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/list.h>
@@ -239,7 +240,7 @@ void cn_del_callback(struct cb_id *id)
 }
 EXPORT_SYMBOL_GPL(cn_del_callback);
 
-static int cn_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
 {
        struct cn_queue_dev *dev = cdev.cbdev;
        struct cn_callback_entry *cbq;
index 1de5ec8d5ea3e9995e3ffd413f728df078c25f8f..ece120da33538d2333c9f0082196b3080d364e2a 100644 (file)
@@ -294,6 +294,7 @@ struct pstate_funcs {
 static struct pstate_funcs pstate_funcs __read_mostly;
 
 static int hwp_active __read_mostly;
+static int hwp_mode_bdw __read_mostly;
 static bool per_cpu_limits __read_mostly;
 static bool hwp_boost __read_mostly;
 
@@ -1413,7 +1414,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
        cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
        cpu->pstate.scaling = pstate_funcs.get_scaling();
        cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
-       cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+       if (hwp_active && !hwp_mode_bdw) {
+               unsigned int phy_max, current_max;
+
+               intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+               cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+       } else {
+               cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+       }
 
        if (pstate_funcs.get_aperf_mperf_shift)
                cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -2467,28 +2476,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
 static inline void intel_pstate_request_control_from_smm(void) {}
 #endif /* CONFIG_ACPI */
 
+#define INTEL_PSTATE_HWP_BROADWELL     0x01
+
+#define ICPU_HWP(model, hwp_mode) \
+       { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
+
 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
-       { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+       ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+       ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
+       ICPU_HWP(X86_MODEL_ANY, 0),
        {}
 };
 
 static int __init intel_pstate_init(void)
 {
+       const struct x86_cpu_id *id;
        int rc;
 
        if (no_load)
                return -ENODEV;
 
-       if (x86_match_cpu(hwp_support_ids)) {
+       id = x86_match_cpu(hwp_support_ids);
+       if (id) {
                copy_cpu_funcs(&core_funcs);
                if (!no_hwp) {
                        hwp_active++;
+                       hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
                        goto hwp_cpu_matched;
                }
        } else {
-               const struct x86_cpu_id *id;
-
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id)
                        return -ENODEV;
index d049fe4b80c48e00d169f3835bb7b70b8022a879..29389accf3e97df7a5700b49e980c88de0974583 100644 (file)
@@ -42,6 +42,8 @@ enum _msm8996_version {
        NUM_OF_MSM8996_VERSIONS,
 };
 
+struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
+
 static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
 {
        size_t len;
@@ -74,7 +76,6 @@ static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
 static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
 {
        struct opp_table *opp_tables[NR_CPUS] = {0};
-       struct platform_device *cpufreq_dt_pdev;
        enum _msm8996_version msm8996_version;
        struct nvmem_cell *speedbin_nvmem;
        struct device_node *np;
@@ -86,8 +87,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
        int ret;
 
        cpu_dev = get_cpu_device(0);
-       if (NULL == cpu_dev)
-               ret = -ENODEV;
+       if (!cpu_dev)
+               return -ENODEV;
 
        msm8996_version = qcom_cpufreq_kryo_get_msm_id();
        if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
@@ -96,8 +97,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
        }
 
        np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
-       if (IS_ERR(np))
-               return PTR_ERR(np);
+       if (!np)
+               return -ENOENT;
 
        ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
        if (!ret) {
@@ -115,6 +116,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
 
        speedbin = nvmem_cell_read(speedbin_nvmem, &len);
        nvmem_cell_put(speedbin_nvmem);
+       if (IS_ERR(speedbin))
+               return PTR_ERR(speedbin);
 
        switch (msm8996_version) {
        case MSM8996_V3:
@@ -127,6 +130,7 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
                BUG();
                break;
        }
+       kfree(speedbin);
 
        for_each_possible_cpu(cpu) {
                cpu_dev = get_cpu_device(cpu);
@@ -162,8 +166,15 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
        return ret;
 }
 
+static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
+{
+       platform_device_unregister(cpufreq_dt_pdev);
+       return 0;
+}
+
 static struct platform_driver qcom_cpufreq_kryo_driver = {
        .probe = qcom_cpufreq_kryo_probe,
+       .remove = qcom_cpufreq_kryo_remove,
        .driver = {
                .name = "qcom-cpufreq-kryo",
        },
@@ -198,8 +209,9 @@ static int __init qcom_cpufreq_kryo_init(void)
        if (unlikely(ret < 0))
                return ret;
 
-       ret = PTR_ERR_OR_ZERO(platform_device_register_simple(
-               "qcom-cpufreq-kryo", -1, NULL, 0));
+       kryo_cpufreq_pdev = platform_device_register_simple(
+               "qcom-cpufreq-kryo", -1, NULL, 0);
+       ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
        if (0 == ret)
                return 0;
 
@@ -208,5 +220,12 @@ static int __init qcom_cpufreq_kryo_init(void)
 }
 module_init(qcom_cpufreq_kryo_init);
 
+static void __init qcom_cpufreq_kryo_exit(void)
+{
+       platform_device_unregister(kryo_cpufreq_pdev);
+       platform_driver_unregister(&qcom_cpufreq_kryo_driver);
+}
+module_exit(qcom_cpufreq_kryo_exit);
+
 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
 MODULE_LICENSE("GPL v2");
index 2bb6f0380758829077a789f8eb26932e108a360c..0997e166ea57755c4d0178702995cce489b06d7e 100644 (file)
@@ -1673,7 +1673,7 @@ static void chtls_timewait(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
 
        tp->rcv_nxt++;
-       tp->rx_opt.ts_recent_stamp = get_seconds();
+       tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
        tp->srtt_us = 0;
        tcp_time_wait(sk, TCP_TIME_WAIT, 0);
 }
index 00c7aab8e7d0f5861e778dc4d26affe5c1234603..afebbd87c4aa1d22ca179f558552cb2f410fcc0a 100644 (file)
@@ -1548,15 +1548,14 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                        tp->urg_data = 0;
 
                if ((avail + offset) >= skb->len) {
-                       if (likely(skb))
-                               chtls_free_skb(sk, skb);
-                       buffers_freed++;
                        if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
                                tp->copied_seq += skb->len;
                                hws->rcvpld = skb->hdr_len;
                        } else {
                                tp->copied_seq += hws->rcvpld;
                        }
+                       chtls_free_skb(sk, skb);
+                       buffers_freed++;
                        hws->copied_seq = 0;
                        if (copied >= target &&
                            !skb_peek(&sk->sk_receive_queue))
index 903d9c473749c24d636f573aba798a9680df2909..45276abf03aa2bd52aa9af56b8cbd45a4b1e5135 100644 (file)
@@ -86,6 +86,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 {
        struct dax_device *dax_dev;
        bool dax_enabled = false;
+       struct request_queue *q;
        pgoff_t pgoff;
        int err, id;
        void *kaddr;
@@ -99,6 +100,13 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
                return false;
        }
 
+       q = bdev_get_queue(bdev);
+       if (!q || !blk_queue_dax(q)) {
+               pr_debug("%s: error: request queue doesn't support dax\n",
+                               bdevname(bdev, buf));
+               return false;
+       }
+
        err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
        if (err) {
                pr_debug("%s: error: unaligned partition for dax\n",
index 951b6c79f166a7d2b4ec14e12096559df9aed684..624a11cb07e23b775d097d930997cc6ebd171b06 100644 (file)
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,               0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,     0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,      0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,                0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_sku,         0444, DMI_PRODUCT_SKU);
 DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0444, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,                0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,          0444, DMI_BOARD_NAME);
@@ -193,6 +194,7 @@ static void __init dmi_id_init_attr_table(void)
        ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
        ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
        ADD_DMI_ATTR(product_family,    DMI_PRODUCT_FAMILY);
+       ADD_DMI_ATTR(product_sku,       DMI_PRODUCT_SKU);
        ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
        ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
        ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
index 54e66adef2525179e49ecfe9fc04e253ecc18e51..f2483548cde92d692f748d6a9c7da0cbf98274a3 100644 (file)
@@ -447,6 +447,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
                dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
                dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
                dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+               dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
                dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
                break;
        case 2:         /* Base Board Information */
index caa37a6dd9d4eca506e3a0c2fed3c636fbd05d2b..a90b0b8fc69a18abb62d10c3f046a88a7300fd5b 100644 (file)
@@ -64,7 +64,7 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
        efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
        efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
        efi_status_t status;
-       efi_physical_addr_t log_location, log_last_entry;
+       efi_physical_addr_t log_location = 0, log_last_entry = 0;
        struct linux_efi_tpm_eventlog *log_tbl = NULL;
        unsigned long first_entry_addr, last_entry_addr;
        size_t log_size, last_entry_size;
index 3317d1536f4fc352247756e3c650d72c9236916b..6e5284e6c028d7624a60cd630419d1e1507fbc14 100644 (file)
@@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
        switch (asic_type) {
 #if defined(CONFIG_DRM_AMD_DC)
        case CHIP_BONAIRE:
-       case CHIP_HAWAII:
        case CHIP_KAVERI:
        case CHIP_KABINI:
        case CHIP_MULLINS:
+               /*
+                * We have systems in the wild with these ASICs that require
+                * LVDS and VGA support which is not supported with DC.
+                *
+                * Fallback to the non-DC driver here by default so as not to
+                * cause regressions.
+                */
+               return amdgpu_dc > 0;
+       case CHIP_HAWAII:
        case CHIP_CARRIZO:
        case CHIP_STONEY:
        case CHIP_POLARIS10:
index 39ec6b8890a1bf200053900b7998e5f33d703a32..e74d620d9699f8a54c273b8b233c81f3efa40f9c 100644 (file)
@@ -376,7 +376,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
        struct amdgpu_device *adev = ring->adev;
        uint64_t index;
 
-       if (ring != &adev->uvd.inst[ring->me].ring) {
+       if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
                ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
                ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
        } else {
index 5e4e1bd9038379fe62666e44318162adfc544fea..3526efa8960e3de2042f944db341dc36bc5bddb2 100644 (file)
@@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
        domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
        if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
                adev->vram_pin_size += amdgpu_bo_size(bo);
-               if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-                       adev->invisible_pin_size += amdgpu_bo_size(bo);
+               adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
        } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
                adev->gart_pin_size += amdgpu_bo_size(bo);
        }
@@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
        bo->pin_count--;
        if (bo->pin_count)
                return 0;
-       for (i = 0; i < bo->placement.num_placement; i++) {
-               bo->placements[i].lpfn = 0;
-               bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       }
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-       if (unlikely(r)) {
-               dev_err(adev->dev, "%p validate failed for unpin\n", bo);
-               goto error;
-       }
 
        if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
                adev->vram_pin_size -= amdgpu_bo_size(bo);
-               if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-                       adev->invisible_pin_size -= amdgpu_bo_size(bo);
+               adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
        } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
                adev->gart_pin_size -= amdgpu_bo_size(bo);
        }
 
-error:
+       for (i = 0; i < bo->placement.num_placement; i++) {
+               bo->placements[i].lpfn = 0;
+               bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+       }
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (unlikely(r))
+               dev_err(adev->dev, "%p validate failed for unpin\n", bo);
+
        return r;
 }
 
index e969c879d87e66c686c0345839da07b39391e3e2..e5da4654b630dd7030704496ee5260e7676324ec 100644 (file)
@@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
 
+u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
 
index bcf68f80bbf058b9cfb8f7a1239f82259f95774b..3ff08e326838f381a91d542e3e2ee7484b46c5b3 100644 (file)
@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        unsigned version_major, version_minor, family_id;
        int i, j, r;
 
-       INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
+       INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
 
        switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
        void *ptr;
        int i, j;
 
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
                if (adev->uvd.inst[j].vcpu_bo == NULL)
                        continue;
 
-               cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
-
                /* only valid for physical mode */
                if (adev->asic_type < CHIP_POLARIS10) {
                        for (i = 0; i < adev->uvd.max_handles; ++i)
@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
        struct amdgpu_device *adev =
-               container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
+               container_of(work, struct amdgpu_device, uvd.idle_work.work);
        unsigned fences = 0, i, j;
 
        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
                                                               AMD_CG_STATE_GATE);
                }
        } else {
-               schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+               schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
        }
 }
 
@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
        if (amdgpu_sriov_vf(adev))
                return;
 
-       set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+       set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
        if (set_clocks) {
                if (adev->pm.dpm_enabled) {
                        amdgpu_dpm_enable_uvd(adev, true);
@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
 {
        if (!amdgpu_sriov_vf(ring->adev))
-               schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+               schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
 }
 
 /**
index b1579fba134c189777d59242d4f9e2dcd97a8378..8b23a1b00c76c95cf2b12c8aff1440415b059560 100644 (file)
@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
        void                    *saved_bo;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
-       struct delayed_work     idle_work;
        struct amdgpu_ring      ring;
        struct amdgpu_ring      ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
        struct amdgpu_irq_src   irq;
@@ -62,6 +61,7 @@ struct amdgpu_uvd {
        bool                    address_64_bit;
        bool                    use_ctx_buf;
        struct amdgpu_uvd_inst          inst[AMDGPU_MAX_UVD_INSTANCES];
+       struct delayed_work     idle_work;
 };
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
index 127e87b470ff4da368c8c1feb0576f0b6cb0c62c..1b4ad9b2a7550189d45f4ac86b7e73bc7cc5966b 100644 (file)
@@ -52,7 +52,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
        unsigned long bo_size;
        const char *fw_name;
        const struct common_firmware_header *hdr;
-       unsigned version_major, version_minor, family_id;
+       unsigned char fw_check;
        int r;
 
        INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
@@ -83,12 +83,33 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 
        hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
        adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
-       family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
-       version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
-       version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
-       DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
-               version_major, version_minor, family_id);
 
+       /* Bit 20-23, it is encode major and non-zero for new naming convention.
+        * This field is part of version minor and DRM_DISABLED_FLAG in old naming
+        * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
+        * is zero in old naming convention, this field is always zero so far.
+        * These four bits are used to tell which naming convention is present.
+        */
+       fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
+       if (fw_check) {
+               unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
+
+               fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
+               enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
+               enc_major = fw_check;
+               dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
+               vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
+               DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
+                       enc_major, enc_minor, dec_ver, vep, fw_rev);
+       } else {
+               unsigned int version_major, version_minor, family_id;
+
+               family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+               version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+               version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+               DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
+                       version_major, version_minor, family_id);
+       }
 
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
                  +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
index b0eb2f537392d192d84d3884bd685f7084e9e220..edf16b2b957a4a4df2624d331f7d758500d888c6 100644 (file)
@@ -1463,7 +1463,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                        uint64_t count;
 
                        max_entries = min(max_entries, 16ull * 1024ull);
-                       for (count = 1; count < max_entries; ++count) {
+                       for (count = 1;
+                            count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+                            ++count) {
                                uint64_t idx = pfn + count;
 
                                if (pages_addr[idx] !=
@@ -1476,7 +1478,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                dma_addr = pages_addr;
                        } else {
                                addr = pages_addr[pfn];
-                               max_entries = count;
+                               max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
                        }
 
                } else if (flags & AMDGPU_PTE_VALID) {
@@ -1491,7 +1493,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                if (r)
                        return r;
 
-               pfn += last - start + 1;
+               pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
                if (nodes && nodes->size == pfn) {
                        pfn = 0;
                        ++nodes;
index 9aca653bec07714874297e327eb950225f5ac555..b6333f92ba4565e9b5949f48e643f2d47daaa2a0 100644 (file)
@@ -96,6 +96,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
                adev->gmc.visible_vram_size : end) - start;
 }
 
+/**
+ * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
+ *
+ * @bo: &amdgpu_bo buffer object (must be in VRAM)
+ *
+ * Returns:
+ * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
+ */
+u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_mem_reg *mem = &bo->tbo.mem;
+       struct drm_mm_node *nodes = mem->mm_node;
+       unsigned pages = mem->num_pages;
+       u64 usage = 0;
+
+       if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
+               return 0;
+
+       if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+               return amdgpu_bo_size(bo);
+
+       while (nodes && pages) {
+               usage += nodes->size << PAGE_SHIFT;
+               usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
+               pages -= nodes->size;
+               ++nodes;
+       }
+
+       return usage;
+}
+
 /**
  * amdgpu_vram_mgr_new - allocate new ranges
  *
@@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
                num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
        }
 
-       nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+       nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
+                              GFP_KERNEL | __GFP_ZERO);
        if (!nodes)
                return -ENOMEM;
 
@@ -190,7 +223,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
                drm_mm_remove_node(&nodes[i]);
        spin_unlock(&mgr->lock);
 
-       kfree(nodes);
+       kvfree(nodes);
        return r == -ENOSPC ? 0 : r;
 }
 
@@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
        atomic64_sub(usage, &mgr->usage);
        atomic64_sub(vis_usage, &mgr->vis_usage);
 
-       kfree(mem->mm_node);
+       kvfree(mem->mm_node);
        mem->mm_node = NULL;
 }
 
index f9add85157e7355432aab9d0d14728f8906774f9..3a8d6356afc2f5ed3b6306243ff1b02df4d4ce88 100644 (file)
@@ -3928,10 +3928,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
        if (acrtc->base.state->event)
                prepare_flip_isr(acrtc);
 
+       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
        surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
        surface_updates->flip_addr = &addr;
 
-
        dc_commit_updates_for_stream(adev->dm.dc,
                                             surface_updates,
                                             1,
@@ -3944,9 +3945,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
                         __func__,
                         addr.address.grph.addr.high_part,
                         addr.address.grph.addr.low_part);
-
-
-       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 }
 
 /*
@@ -4206,6 +4204,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        struct drm_connector *connector;
        struct drm_connector_state *old_con_state, *new_con_state;
        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+       int crtc_disable_count = 0;
 
        drm_atomic_helper_update_legacy_modeset_state(dev, state);
 
@@ -4410,6 +4409,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
                bool modeset_needed;
 
+               if (old_crtc_state->active && !new_crtc_state->active)
+                       crtc_disable_count++;
+
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
                modeset_needed = modeset_required(
@@ -4463,11 +4465,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
         * so we can put the GPU into runtime suspend if we're not driving any
         * displays anymore
         */
+       for (i = 0; i < crtc_disable_count; i++)
+               pm_runtime_put_autosuspend(dev->dev);
        pm_runtime_mark_last_busy(dev->dev);
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-               if (old_crtc_state->active && !new_crtc_state->active)
-                       pm_runtime_put_autosuspend(dev->dev);
-       }
 }
 
 
index dbe4b1f66784961ea028b3fcfee564e830617f80..22364875a943e5e32e7e13d5fe2ef79d579f2824 100644 (file)
@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
 static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
-       int result;
+       int result = 0;
        uint32_t num_se = 0;
        uint32_t count, data;
 
index 8d20faa198cf199ff65d604ca9f4f1effd633c86..0a788d76ed5f02f6aed755447783874280b5cf7a 100644 (file)
@@ -278,7 +278,6 @@ static int malidp_init(struct drm_device *drm)
 
 static void malidp_fini(struct drm_device *drm)
 {
-       drm_atomic_helper_shutdown(drm);
        drm_mode_config_cleanup(drm);
 }
 
@@ -646,6 +645,7 @@ static int malidp_bind(struct device *dev)
        malidp_de_irq_fini(drm);
        drm->irq_enabled = false;
 irq_init_fail:
+       drm_atomic_helper_shutdown(drm);
        component_unbind_all(dev, drm);
 bind_fail:
        of_node_put(malidp->crtc.port);
@@ -681,6 +681,7 @@ static void malidp_unbind(struct device *dev)
        malidp_se_irq_fini(drm);
        malidp_de_irq_fini(drm);
        drm->irq_enabled = false;
+       drm_atomic_helper_shutdown(drm);
        component_unbind_all(dev, drm);
        of_node_put(malidp->crtc.port);
        malidp->crtc.port = NULL;
index d789b46dc817335dd2d509ee456d850897762b88..069783e715f1777829b3d992b2add45b8fb693da 100644 (file)
@@ -634,7 +634,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
                                .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
                        },
                        .se_irq_map = {
-                               .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
+                               .irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
+                                           MALIDP500_SE_IRQ_GLOBAL,
                                .vsync_irq = 0,
                        },
                        .dc_irq_map = {
index 7a44897c50fea784bf516db7f17866ee31f413fa..29409a65d864760e674f787cb5279cdbff5b91a7 100644 (file)
@@ -23,6 +23,7 @@
 
 /* Layer specific register offsets */
 #define MALIDP_LAYER_FORMAT            0x000
+#define   LAYER_FORMAT_MASK            0x3f
 #define MALIDP_LAYER_CONTROL           0x004
 #define   LAYER_ENABLE                 (1 << 0)
 #define   LAYER_FLOWCFG_MASK           7
@@ -235,8 +236,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
        if (state->rotation & MALIDP_ROTATED_MASK) {
                int val;
 
-               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
-                                                    state->crtc_w,
+               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
+                                                    state->crtc_h,
                                                     fb->format->format);
                if (val < 0)
                        return val;
@@ -337,7 +338,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
        dest_w = plane->state->crtc_w;
        dest_h = plane->state->crtc_h;
 
-       malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
+       val = malidp_hw_read(mp->hwdev, mp->layer->base);
+       val = (val & ~LAYER_FORMAT_MASK) | ms->format;
+       malidp_hw_write(mp->hwdev, val, mp->layer->base);
 
        for (i = 0; i < ms->n_planes; i++) {
                /* calculate the offset for the layer's plane registers */
index 73c875db45f4346afd5a25408e9264c466401138..47e0992f39083161d46c5d1759f0c06f5cf2c0de 100644 (file)
@@ -839,7 +839,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
                        return ret;
        }
 
-       if (desc->layout.xstride && desc->layout.pstride) {
+       if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
                int ret;
 
                ret = drm_plane_create_rotation_property(&plane->base,
index 7ab36042a822cf6cfec2fc440ef3fbe6e018fa3d..250effa0e6b83184e4ff60986b10ecaa1334eef1 100644 (file)
 
 #define SII8620_BURST_BUF_LEN 288
 #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
-#define MHL1_MAX_LCLK 225000
-#define MHL3_MAX_LCLK 600000
+
+#define MHL1_MAX_PCLK 75000
+#define MHL1_MAX_PCLK_PP_MODE 150000
+#define MHL3_MAX_PCLK 200000
+#define MHL3_MAX_PCLK_PP_MODE 300000
 
 enum sii8620_mode {
        CM_DISCONNECTED,
@@ -80,6 +83,9 @@ struct sii8620 {
        u8 devcap[MHL_DCAP_SIZE];
        u8 xdevcap[MHL_XDC_SIZE];
        u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
+       bool feature_complete;
+       bool devcap_read;
+       bool sink_detected;
        struct edid *edid;
        unsigned int gen2_write_burst:1;
        enum sii8620_mt_state mt_state;
@@ -476,7 +482,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
        }
 }
 
-static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
+static void sii8620_identify_sink(struct sii8620 *ctx)
 {
        static const char * const sink_str[] = {
                [SINK_NONE] = "NONE",
@@ -487,7 +493,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
        char sink_name[20];
        struct device *dev = ctx->dev;
 
-       if (ret < 0)
+       if (!ctx->sink_detected || !ctx->devcap_read)
                return;
 
        sii8620_fetch_edid(ctx);
@@ -496,6 +502,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
                sii8620_mhl_disconnected(ctx);
                return;
        }
+       sii8620_set_upstream_edid(ctx);
 
        if (drm_detect_hdmi_monitor(ctx->edid))
                ctx->sink_type = SINK_HDMI;
@@ -508,53 +515,6 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
                 sink_str[ctx->sink_type], sink_name);
 }
 
-static void sii8620_hsic_init(struct sii8620 *ctx)
-{
-       if (!sii8620_is_mhl3(ctx))
-               return;
-
-       sii8620_write(ctx, REG_FCGC,
-               BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
-       sii8620_setbits(ctx, REG_HRXCTRL3,
-               BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
-       sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
-       sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
-       sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
-       sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
-       sii8620_write_seq_static(ctx,
-               REG_TDMLLCTL, 0,
-               REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
-                       BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
-               REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
-               REG_HRXINTL, 0xff,
-               REG_HRXINTH, 0xff,
-               REG_TTXINTL, 0xff,
-               REG_TTXINTH, 0xff,
-               REG_TRXINTL, 0xff,
-               REG_TRXINTH, 0xff,
-               REG_HTXINTL, 0xff,
-               REG_HTXINTH, 0xff,
-               REG_FCINTR0, 0xff,
-               REG_FCINTR1, 0xff,
-               REG_FCINTR2, 0xff,
-               REG_FCINTR3, 0xff,
-               REG_FCINTR4, 0xff,
-               REG_FCINTR5, 0xff,
-               REG_FCINTR6, 0xff,
-               REG_FCINTR7, 0xff
-       );
-}
-
-static void sii8620_edid_read(struct sii8620 *ctx, int ret)
-{
-       if (ret < 0)
-               return;
-
-       sii8620_set_upstream_edid(ctx);
-       sii8620_hsic_init(ctx);
-       sii8620_enable_hpd(ctx);
-}
-
 static void sii8620_mr_devcap(struct sii8620 *ctx)
 {
        u8 dcap[MHL_DCAP_SIZE];
@@ -570,6 +530,8 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
                 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
                 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
        sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+       ctx->devcap_read = true;
+       sii8620_identify_sink(ctx);
 }
 
 static void sii8620_mr_xdevcap(struct sii8620 *ctx)
@@ -807,6 +769,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx)
 static void sii8620_fetch_edid(struct sii8620 *ctx)
 {
        u8 lm_ddc, ddc_cmd, int3, cbus;
+       unsigned long timeout;
        int fetched, i;
        int edid_len = EDID_LENGTH;
        u8 *edid;
@@ -856,23 +819,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
                        REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
                );
 
-               do {
-                       int3 = sii8620_readb(ctx, REG_INTR3);
+               int3 = 0;
+               timeout = jiffies + msecs_to_jiffies(200);
+               for (;;) {
                        cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
-
-                       if (int3 & BIT_DDC_CMD_DONE)
-                               break;
-
-                       if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+                       if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) {
+                               kfree(edid);
+                               edid = NULL;
+                               goto end;
+                       }
+                       if (int3 & BIT_DDC_CMD_DONE) {
+                               if (sii8620_readb(ctx, REG_DDC_DOUT_CNT)
+                                   >= FETCH_SIZE)
+                                       break;
+                       } else {
+                               int3 = sii8620_readb(ctx, REG_INTR3);
+                       }
+                       if (time_is_before_jiffies(timeout)) {
+                               ctx->error = -ETIMEDOUT;
+                               dev_err(ctx->dev, "timeout during EDID read\n");
                                kfree(edid);
                                edid = NULL;
                                goto end;
                        }
-               } while (1);
-
-               sii8620_readb(ctx, REG_DDC_STATUS);
-               while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
                        usleep_range(10, 20);
+               }
 
                sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
                if (fetched + FETCH_SIZE == EDID_LENGTH) {
@@ -971,8 +942,17 @@ static int sii8620_hw_on(struct sii8620 *ctx)
        ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
        if (ret)
                return ret;
+
        usleep_range(10000, 20000);
-       return clk_prepare_enable(ctx->clk_xtal);
+       ret = clk_prepare_enable(ctx->clk_xtal);
+       if (ret)
+               return ret;
+
+       msleep(100);
+       gpiod_set_value(ctx->gpio_reset, 0);
+       msleep(100);
+
+       return 0;
 }
 
 static int sii8620_hw_off(struct sii8620 *ctx)
@@ -982,17 +962,6 @@ static int sii8620_hw_off(struct sii8620 *ctx)
        return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
 }
 
-static void sii8620_hw_reset(struct sii8620 *ctx)
-{
-       usleep_range(10000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 0);
-       usleep_range(5000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 1);
-       usleep_range(10000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 0);
-       msleep(300);
-}
-
 static void sii8620_cbus_reset(struct sii8620 *ctx)
 {
        sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
@@ -1048,20 +1017,11 @@ static void sii8620_stop_video(struct sii8620 *ctx)
 
 static void sii8620_set_format(struct sii8620 *ctx)
 {
-       u8 out_fmt;
-
        if (sii8620_is_mhl3(ctx)) {
                sii8620_setbits(ctx, REG_M3_P0CTRL,
                                BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
                                ctx->use_packed_pixel ? ~0 : 0);
        } else {
-               if (ctx->use_packed_pixel)
-                       sii8620_write_seq_static(ctx,
-                               REG_VID_MODE, BIT_VID_MODE_M1080P,
-                               REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
-                               REG_MHLTX_CTL6, 0x60
-                       );
-               else
                        sii8620_write_seq_static(ctx,
                                REG_VID_MODE, 0,
                                REG_MHL_TOP_CTL, 1,
@@ -1069,15 +1029,9 @@ static void sii8620_set_format(struct sii8620 *ctx)
                        );
        }
 
-       if (ctx->use_packed_pixel)
-               out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
-                       BIT_TPI_OUTPUT_CSCMODE709;
-       else
-               out_fmt = VAL_TPI_FORMAT(RGB, FULL);
-
        sii8620_write_seq(ctx,
                REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
-               REG_TPI_OUTPUT, out_fmt,
+               REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
        );
 }
 
@@ -1216,7 +1170,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
                int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
                int i;
 
-               for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
+               for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
                        if (clk < clk_spec[i].max_clk)
                                break;
 
@@ -1534,6 +1488,16 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
        );
 }
 
+static void sii8620_hpd_unplugged(struct sii8620 *ctx)
+{
+       sii8620_disable_hpd(ctx);
+       ctx->sink_type = SINK_NONE;
+       ctx->sink_detected = false;
+       ctx->feature_complete = false;
+       kfree(ctx->edid);
+       ctx->edid = NULL;
+}
+
 static void sii8620_disconnect(struct sii8620 *ctx)
 {
        sii8620_disable_gen2_write_burst(ctx);
@@ -1561,7 +1525,7 @@ static void sii8620_disconnect(struct sii8620 *ctx)
                REG_MHL_DP_CTL6, 0x2A,
                REG_MHL_DP_CTL7, 0x03
        );
-       sii8620_disable_hpd(ctx);
+       sii8620_hpd_unplugged(ctx);
        sii8620_write_seq_static(ctx,
                REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
                REG_MHL_COC_CTL1, 0x07,
@@ -1609,10 +1573,8 @@ static void sii8620_disconnect(struct sii8620 *ctx)
        memset(ctx->xstat, 0, sizeof(ctx->xstat));
        memset(ctx->devcap, 0, sizeof(ctx->devcap));
        memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+       ctx->devcap_read = false;
        ctx->cbus_status = 0;
-       ctx->sink_type = SINK_NONE;
-       kfree(ctx->edid);
-       ctx->edid = NULL;
        sii8620_mt_cleanup(ctx);
 }
 
@@ -1703,9 +1665,6 @@ static void sii8620_status_changed_path(struct sii8620 *ctx)
                sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
                                      MHL_DST_LM_CLK_MODE_NORMAL
                                      | MHL_DST_LM_PATH_ENABLED);
-               if (!sii8620_is_mhl3(ctx))
-                       sii8620_mt_read_devcap(ctx, false);
-               sii8620_mt_set_cont(ctx, sii8620_sink_detected);
        } else {
                sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
                                      MHL_DST_LM_CLK_MODE_NORMAL);
@@ -1722,9 +1681,14 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
        sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
        sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
 
-       if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+       if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] &
+           MHL_DST_CONN_DCAP_RDY) {
                sii8620_status_dcap_ready(ctx);
 
+               if (!sii8620_is_mhl3(ctx))
+                       sii8620_mt_read_devcap(ctx, false);
+       }
+
        if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
                sii8620_status_changed_path(ctx);
 }
@@ -1808,8 +1772,11 @@ static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
        }
        if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
                sii8620_send_features(ctx);
-       if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE)
-               sii8620_edid_read(ctx, 0);
+       if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) {
+               ctx->feature_complete = true;
+               if (ctx->edid)
+                       sii8620_enable_hpd(ctx);
+       }
 }
 
 static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1884,6 +1851,15 @@ static void sii8620_irq_msc(struct sii8620 *ctx)
        if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
                sii8620_msc_mr_write_stat(ctx);
 
+       if (stat & BIT_CBUS_HPD_CHG) {
+               if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) {
+                       ctx->sink_detected = true;
+                       sii8620_identify_sink(ctx);
+               } else {
+                       sii8620_hpd_unplugged(ctx);
+               }
+       }
+
        if (stat & BIT_CBUS_MSC_MR_SET_INT)
                sii8620_msc_mr_set_int(ctx);
 
@@ -1931,14 +1907,6 @@ static void sii8620_irq_edid(struct sii8620 *ctx)
                ctx->mt_state = MT_STATE_DONE;
 }
 
-static void sii8620_scdt_high(struct sii8620 *ctx)
-{
-       sii8620_write_seq_static(ctx,
-               REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
-               REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
-       );
-}
-
 static void sii8620_irq_scdt(struct sii8620 *ctx)
 {
        u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1946,53 +1914,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
        if (stat & BIT_INTR_SCDT_CHANGE) {
                u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
 
-               if (cstat & BIT_TMDS_CSTAT_P3_SCDT) {
-                       if (ctx->sink_type == SINK_HDMI)
-                               /* enable infoframe interrupt */
-                               sii8620_scdt_high(ctx);
-                       else
-                               sii8620_start_video(ctx);
-               }
+               if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+                       sii8620_start_video(ctx);
        }
 
        sii8620_write(ctx, REG_INTR5, stat);
 }
 
-static void sii8620_new_vsi(struct sii8620 *ctx)
-{
-       u8 vsif[11];
-
-       sii8620_write(ctx, REG_RX_HDMI_CTRL2,
-                     VAL_RX_HDMI_CTRL2_DEFVAL |
-                     BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
-       sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
-                        ARRAY_SIZE(vsif));
-}
-
-static void sii8620_new_avi(struct sii8620 *ctx)
-{
-       sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
-       sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
-                        ARRAY_SIZE(ctx->avif));
-}
-
-static void sii8620_irq_infr(struct sii8620 *ctx)
-{
-       u8 stat = sii8620_readb(ctx, REG_INTR8)
-               & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
-
-       sii8620_write(ctx, REG_INTR8, stat);
-
-       if (stat & BIT_CEA_NEW_VSI)
-               sii8620_new_vsi(ctx);
-
-       if (stat & BIT_CEA_NEW_AVI)
-               sii8620_new_avi(ctx);
-
-       if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
-               sii8620_start_video(ctx);
-}
-
 static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
 {
        if (ret < 0)
@@ -2043,11 +1971,11 @@ static void sii8620_irq_ddc(struct sii8620 *ctx)
 
        if (stat & BIT_DDC_CMD_DONE) {
                sii8620_write(ctx, REG_INTR3_MASK, 0);
-               if (sii8620_is_mhl3(ctx))
+               if (sii8620_is_mhl3(ctx) && !ctx->feature_complete)
                        sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
                                           MHL_INT_RC_FEAT_REQ);
                else
-                       sii8620_edid_read(ctx, 0);
+                       sii8620_enable_hpd(ctx);
        }
        sii8620_write(ctx, REG_INTR3, stat);
 }
@@ -2074,7 +2002,6 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
                { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
                { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
                { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
-               { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
        };
        struct sii8620 *ctx = data;
        u8 stats[LEN_FAST_INTR_STAT];
@@ -2112,7 +2039,6 @@ static void sii8620_cable_in(struct sii8620 *ctx)
                dev_err(dev, "Error powering on, %d.\n", ret);
                return;
        }
-       sii8620_hw_reset(ctx);
 
        sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
        ret = sii8620_clear_error(ctx);
@@ -2268,17 +2194,43 @@ static void sii8620_detach(struct drm_bridge *bridge)
        rc_unregister_device(ctx->rc_dev);
 }
 
+static int sii8620_is_packing_required(struct sii8620 *ctx,
+                                      const struct drm_display_mode *mode)
+{
+       int max_pclk, max_pclk_pp_mode;
+
+       if (sii8620_is_mhl3(ctx)) {
+               max_pclk = MHL3_MAX_PCLK;
+               max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE;
+       } else {
+               max_pclk = MHL1_MAX_PCLK;
+               max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE;
+       }
+
+       if (mode->clock < max_pclk)
+               return 0;
+       else if (mode->clock < max_pclk_pp_mode)
+               return 1;
+       else
+               return -1;
+}
+
 static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge,
                                         const struct drm_display_mode *mode)
 {
        struct sii8620 *ctx = bridge_to_sii8620(bridge);
+       int pack_required = sii8620_is_packing_required(ctx, mode);
        bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] &
                        MHL_DCAP_VID_LINK_PPIXEL;
-       unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK :
-                                                      MHL1_MAX_LCLK;
-       max_pclk /= can_pack ? 2 : 3;
 
-       return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK;
+       switch (pack_required) {
+       case 0:
+               return MODE_OK;
+       case 1:
+               return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH;
+       default:
+               return MODE_CLOCK_HIGH;
+       }
 }
 
 static bool sii8620_mode_fixup(struct drm_bridge *bridge,
@@ -2286,43 +2238,16 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
                               struct drm_display_mode *adjusted_mode)
 {
        struct sii8620 *ctx = bridge_to_sii8620(bridge);
-       int max_lclk;
-       bool ret = true;
 
        mutex_lock(&ctx->lock);
 
-       max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK;
-       if (max_lclk > 3 * adjusted_mode->clock) {
-               ctx->use_packed_pixel = 0;
-               goto end;
-       }
-       if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
-           max_lclk > 2 * adjusted_mode->clock) {
-               ctx->use_packed_pixel = 1;
-               goto end;
-       }
-       ret = false;
-end:
-       if (ret) {
-               u8 vic = drm_match_cea_mode(adjusted_mode);
-
-               if (!vic) {
-                       union hdmi_infoframe frm;
-                       u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
-
-                       /* FIXME: We need the connector here */
-                       drm_hdmi_vendor_infoframe_from_display_mode(
-                               &frm.vendor.hdmi, NULL, adjusted_mode);
-                       vic = frm.vendor.hdmi.vic;
-                       if (vic >= ARRAY_SIZE(mhl_vic))
-                               vic = 0;
-                       vic = mhl_vic[vic];
-               }
-               ctx->video_code = vic;
-               ctx->pixel_clock = adjusted_mode->clock;
-       }
+       ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
+       ctx->video_code = drm_match_cea_mode(adjusted_mode);
+       ctx->pixel_clock = adjusted_mode->clock;
+
        mutex_unlock(&ctx->lock);
-       return ret;
+
+       return true;
 }
 
 static const struct drm_bridge_funcs sii8620_bridge_funcs = {
index b553a6f2ff0eb27dec7ad0aaeeb891992fe19ab8..7af748ed1c58dddfae7cb578760be3344273901c 100644 (file)
@@ -369,13 +369,6 @@ EXPORT_SYMBOL(drm_dev_exit);
  */
 void drm_dev_unplug(struct drm_device *dev)
 {
-       drm_dev_unregister(dev);
-
-       mutex_lock(&drm_global_mutex);
-       if (dev->open_count == 0)
-               drm_dev_put(dev);
-       mutex_unlock(&drm_global_mutex);
-
        /*
         * After synchronizing any critical read section is guaranteed to see
         * the new value of ->unplugged, and any critical section which might
@@ -384,6 +377,13 @@ void drm_dev_unplug(struct drm_device *dev)
         */
        dev->unplugged = true;
        synchronize_srcu(&drm_unplug_srcu);
+
+       drm_dev_unregister(dev);
+
+       mutex_lock(&drm_global_mutex);
+       if (dev->open_count == 0)
+               drm_dev_put(dev);
+       mutex_unlock(&drm_global_mutex);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
index 34c125e2d90c094c98759e127d2f93780cd65788..52f3b91d14fd00f7751c874600a4763eadaee8bf 100644 (file)
@@ -340,14 +340,21 @@ struct drm_i915_file_private {
 
        unsigned int bsd_engine;
 
-/* Client can have a maximum of 3 contexts banned before
- * it is denied of creating new contexts. As one context
- * ban needs 4 consecutive hangs, and more if there is
- * progress in between, this is a last resort stop gap measure
- * to limit the badly behaving clients access to gpu.
+/*
+ * Every context ban increments per client ban score. Also
+ * hangs in short succession increments ban score. If ban threshold
+ * is reached, client is considered banned and submitting more work
+ * will fail. This is a stop gap measure to limit the badly behaving
+ * clients access to gpu. Note that unbannable contexts never increment
+ * the client ban score.
  */
-#define I915_MAX_CLIENT_CONTEXT_BANS 3
-       atomic_t context_bans;
+#define I915_CLIENT_SCORE_HANG_FAST    1
+#define   I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
+#define I915_CLIENT_SCORE_CONTEXT_BAN   3
+#define I915_CLIENT_SCORE_BANNED       9
+       /** ban_score: Accumulated score of all ctx bans and fast hangs. */
+       atomic_t ban_score;
+       unsigned long hang_timestamp;
 };
 
 /* Interface history:
@@ -2238,9 +2245,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg)
  **/
 static inline struct scatterlist *__sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        return sg_is_last(sg) ? NULL : ____sg_next(sg);
 }
 
index 3704f4c0c2c970c31b0c6031050b20126e7ae2a9..d44ad7bc1e945bce0351cac4b75762d0e9cb2413 100644 (file)
@@ -2933,32 +2933,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
        return 0;
 }
 
+static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
+                                       const struct i915_gem_context *ctx)
+{
+       unsigned int score;
+       unsigned long prev_hang;
+
+       if (i915_gem_context_is_banned(ctx))
+               score = I915_CLIENT_SCORE_CONTEXT_BAN;
+       else
+               score = 0;
+
+       prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+       if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+               score += I915_CLIENT_SCORE_HANG_FAST;
+
+       if (score) {
+               atomic_add(score, &file_priv->ban_score);
+
+               DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
+                                ctx->name, score,
+                                atomic_read(&file_priv->ban_score));
+       }
+}
+
 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
 {
-       bool banned;
+       unsigned int score;
+       bool banned, bannable;
 
        atomic_inc(&ctx->guilty_count);
 
-       banned = false;
-       if (i915_gem_context_is_bannable(ctx)) {
-               unsigned int score;
+       bannable = i915_gem_context_is_bannable(ctx);
+       score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+       banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
 
-               score = atomic_add_return(CONTEXT_SCORE_GUILTY,
-                                         &ctx->ban_score);
-               banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+       DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
+                        ctx->name, atomic_read(&ctx->guilty_count),
+                        score, yesno(banned && bannable));
 
-               DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
-                                ctx->name, score, yesno(banned));
-       }
-       if (!banned)
+       /* Cool contexts don't accumulate client ban score */
+       if (!bannable)
                return;
 
-       i915_gem_context_set_banned(ctx);
-       if (!IS_ERR_OR_NULL(ctx->file_priv)) {
-               atomic_inc(&ctx->file_priv->context_bans);
-               DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
-                                ctx->name, atomic_read(&ctx->file_priv->context_bans));
-       }
+       if (banned)
+               i915_gem_context_set_banned(ctx);
+
+       if (!IS_ERR_OR_NULL(ctx->file_priv))
+               i915_gem_client_mark_guilty(ctx->file_priv, ctx);
 }
 
 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
@@ -5736,6 +5758,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
        INIT_LIST_HEAD(&file_priv->mm.request_list);
 
        file_priv->bsd_engine = -1;
+       file_priv->hang_timestamp = jiffies;
 
        ret = i915_gem_context_open(i915, file);
        if (ret)
index 33f8a4b3c98170f2857e15255e4fc23ae8bbb49e..060335d3d9e0b44d19c9b3147e2e1496d1e8e571 100644 (file)
@@ -652,7 +652,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 
 static bool client_is_banned(struct drm_i915_file_private *file_priv)
 {
-       return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
+       return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
index f627a8c47c58a36f6ff92f17a4d6d672b28bc00b..22df17c8ca9b0fc75dc29189c6b7117002c5ad17 100644 (file)
@@ -489,7 +489,9 @@ eb_validate_vma(struct i915_execbuffer *eb,
 }
 
 static int
-eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
+eb_add_vma(struct i915_execbuffer *eb,
+          unsigned int i, unsigned batch_idx,
+          struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
        int err;
@@ -522,6 +524,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
        eb->flags[i] = entry->flags;
        vma->exec_flags = &eb->flags[i];
 
+       /*
+        * SNA is doing fancy tricks with compressing batch buffers, which leads
+        * to negative relocation deltas. Usually that works out ok since the
+        * relocate address is still positive, except when the batch is placed
+        * very low in the GTT. Ensure this doesn't happen.
+        *
+        * Note that actual hangs have only been observed on gen7, but for
+        * paranoia do it everywhere.
+        */
+       if (i == batch_idx) {
+               if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+                       eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+               if (eb->reloc_cache.has_fence)
+                       eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+
+               eb->batch = vma;
+       }
+
        err = 0;
        if (eb_pin_vma(eb, entry, vma)) {
                if (entry->offset != vma->node.start) {
@@ -716,7 +736,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 {
        struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
        struct drm_i915_gem_object *obj;
-       unsigned int i;
+       unsigned int i, batch;
        int err;
 
        if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@@ -728,6 +748,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
        INIT_LIST_HEAD(&eb->relocs);
        INIT_LIST_HEAD(&eb->unbound);
 
+       batch = eb_batch_index(eb);
+
        for (i = 0; i < eb->buffer_count; i++) {
                u32 handle = eb->exec[i].handle;
                struct i915_lut_handle *lut;
@@ -770,33 +792,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
                lut->handle = handle;
 
 add_vma:
-               err = eb_add_vma(eb, i, vma);
+               err = eb_add_vma(eb, i, batch, vma);
                if (unlikely(err))
                        goto err_vma;
 
                GEM_BUG_ON(vma != eb->vma[i]);
                GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
+               GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
+                          eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
        }
 
-       /* take note of the batch buffer before we might reorder the lists */
-       i = eb_batch_index(eb);
-       eb->batch = eb->vma[i];
-       GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
-
-       /*
-        * SNA is doing fancy tricks with compressing batch buffers, which leads
-        * to negative relocation deltas. Usually that works out ok since the
-        * relocate address is still positive, except when the batch is placed
-        * very low in the GTT. Ensure this doesn't happen.
-        *
-        * Note that actual hangs have only been observed on gen7, but for
-        * paranoia do it everywhere.
-        */
-       if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
-               eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
-       if (eb->reloc_cache.has_fence)
-               eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
-
        eb->args->flags |= __EXEC_VALIDATED;
        return eb_reserve(eb);
 
index f9bc3aaa90d0f5de110e893415be0a6ee1c40448..4a02747ac65877ce5e7aa24a4363ecedbc7e024e 100644 (file)
@@ -1893,9 +1893,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
 
                /*
                 * Clear the PIPE*STAT regs before the IIR
+                *
+                * Toggle the enable bits to make sure we get an
+                * edge in the ISR pipe event bit if we don't clear
+                * all the enabled status bits. Otherwise the edge
+                * triggered IIR on i965/g4x wouldn't notice that
+                * an interrupt is still pending.
                 */
-               if (pipe_stats[pipe])
-                       I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
+               if (pipe_stats[pipe]) {
+                       I915_WRITE(reg, pipe_stats[pipe]);
+                       I915_WRITE(reg, enable_mask);
+               }
        }
        spin_unlock(&dev_priv->irq_lock);
 }
index f11bb213ec0784e4c50db5bd0ea0647a5419e0e1..7720569f20244114e027dca6b810b182771cfcee 100644 (file)
@@ -2425,12 +2425,17 @@ enum i915_power_well_id {
 #define _3D_CHICKEN    _MMIO(0x2084)
 #define  _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB     (1 << 10)
 #define _3D_CHICKEN2   _MMIO(0x208c)
+
+#define FF_SLICE_CHICKEN       _MMIO(0x2088)
+#define  FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX      (1 << 1)
+
 /* Disables pipelining of read flushes past the SF-WIZ interface.
  * Required on all Ironlake steppings according to the B-Spec, but the
  * particular danger of not doing so is not specified.
  */
 # define _3D_CHICKEN2_WM_READ_PIPELINED                        (1 << 14)
 #define _3D_CHICKEN3   _MMIO(0x2090)
+#define  _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX           (1 << 12)
 #define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL            (1 << 10)
 #define  _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE       (1 << 5)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL         (1 << 5)
index de0e22322c76ed649c2f36266e65247ed9d02c28..072b326d5ee0a77868de83818a334c68077c762e 100644 (file)
@@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
        int max_dotclk = dev_priv->max_dotclk_freq;
        int max_clock;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (mode->clock < 25000)
                return MODE_CLOCK_LOW;
 
@@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config,
                                     struct drm_connector_state *conn_state)
 {
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        return true;
 }
 
@@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
                                   struct intel_crtc_state *pipe_config,
                                   struct drm_connector_state *conn_state)
 {
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_pch_encoder = true;
 
        return true;
@@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
                                   struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
 
        pipe_config->has_pch_encoder = true;
 
index dee3a8e659f1d6c9dbe2040abd6e2ba42020070a..2cc6faa1daa8d464d5fa2c7a063ef7d6cb3965cc 100644 (file)
@@ -14469,12 +14469,22 @@ static enum drm_mode_status
 intel_mode_valid(struct drm_device *dev,
                 const struct drm_display_mode *mode)
 {
+       /*
+        * Can't reject DBLSCAN here because Xorg ddxen can add piles
+        * of DBLSCAN modes to the output's mode list when they detect
+        * the scaling mode property on the connector. And they don't
+        * ask the kernel to validate those modes in any way until
+        * modeset time at which point the client gets a protocol error.
+        * So in order to not upset those clients we silently ignore the
+        * DBLSCAN flag on such connectors. For other connectors we will
+        * reject modes with the DBLSCAN flag in encoder->compute_config().
+        * And we always reject DBLSCAN modes in connector->mode_valid()
+        * as we never want such modes on the connector's mode list.
+        */
+
        if (mode->vscan > 1)
                return MODE_NO_VSCAN;
 
-       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return MODE_NO_DBLESCAN;
-
        if (mode->flags & DRM_MODE_FLAG_HSKEW)
                return MODE_H_ILLEGAL;
 
index 8320f0e8e3bef8587b94a908bf87f3eecdf63fc5..16faea30114ac01f2744b69150d7a26d7ca578c3 100644 (file)
@@ -420,6 +420,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
        int max_rate, mode_rate, max_lanes, max_link_clock;
        int max_dotclk;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 
        if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -1862,7 +1865,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                                                conn_state->scaling_mode);
        }
 
-       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
+       if (HAS_GMCH_DISPLAY(dev_priv) &&
            adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
                return false;
 
@@ -2782,16 +2788,6 @@ static void intel_disable_dp(struct intel_encoder *encoder,
 static void g4x_disable_dp(struct intel_encoder *encoder,
                           const struct intel_crtc_state *old_crtc_state,
                           const struct drm_connector_state *old_conn_state)
-{
-       intel_disable_dp(encoder, old_crtc_state, old_conn_state);
-
-       /* disable the port before the pipe on g4x */
-       intel_dp_link_down(encoder, old_crtc_state);
-}
-
-static void ilk_disable_dp(struct intel_encoder *encoder,
-                          const struct intel_crtc_state *old_crtc_state,
-                          const struct drm_connector_state *old_conn_state)
 {
        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
 }
@@ -2807,13 +2803,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
 }
 
-static void ilk_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_encoder *encoder,
                                const struct intel_crtc_state *old_crtc_state,
                                const struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = encoder->port;
 
+       /*
+        * Bspec does not list a specific disable sequence for g4x DP.
+        * Follow the ilk+ sequence (disable pipe before the port) for
+        * g4x DP as it does not suffer from underruns like the normal
+        * g4x modeset sequence (disable pipe after the port).
+        */
        intel_dp_link_down(encoder, old_crtc_state);
 
        /* Only ilk+ has port A */
@@ -6337,7 +6339,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
        drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
-       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+       if (!HAS_GMCH_DISPLAY(dev_priv))
                connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
@@ -6436,15 +6438,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
                intel_encoder->enable = vlv_enable_dp;
                intel_encoder->disable = vlv_disable_dp;
                intel_encoder->post_disable = vlv_post_disable_dp;
-       } else if (INTEL_GEN(dev_priv) >= 5) {
-               intel_encoder->pre_enable = g4x_pre_enable_dp;
-               intel_encoder->enable = g4x_enable_dp;
-               intel_encoder->disable = ilk_disable_dp;
-               intel_encoder->post_disable = ilk_post_disable_dp;
        } else {
                intel_encoder->pre_enable = g4x_pre_enable_dp;
                intel_encoder->enable = g4x_enable_dp;
                intel_encoder->disable = g4x_disable_dp;
+               intel_encoder->post_disable = g4x_post_disable_dp;
        }
 
        intel_dig_port->dp.output_reg = output_reg;
index 9e6956c0868835a9bcdf156c45d151ee2479b99a..5890500a3a8b6640e587070e89d805b06c99baf7 100644 (file)
@@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
                                           DP_DPCD_QUIRK_LIMITED_M_N);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_pch_encoder = false;
        bpp = 24;
        if (intel_dp->compliance.test_data.bpc) {
@@ -366,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
        if (!intel_dp)
                return MODE_ERROR;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        max_link_clock = intel_dp_max_link_rate(intel_dp);
        max_lanes = intel_dp_max_lane_count(intel_dp);
 
index cf39ca90d887872ddb2de5e011041f785fda3996..f349b39201993c88f633c1154c9fc9054c90449c 100644 (file)
@@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
                                                conn_state->scaling_mode);
        }
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        /* DSI uses short packets for sync events, so clear mode flags for DSI */
        adjusted_mode->flags = 0;
 
@@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
 
        DRM_DEBUG_KMS("\n");
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (fixed_mode) {
                if (mode->hdisplay > fixed_mode->hdisplay)
                        return MODE_PANEL;
index a70d767313aa10e198338e2e7472592827dfa347..61d908e0df0e2d75175f6f6300d0114b66f3833d 100644 (file)
@@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
        int target_clock = mode->clock;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        /* XXX: Validate clock range */
 
        if (fixed_mode) {
@@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
        if (fixed_mode)
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        return true;
 }
 
index ee929f31f7db712d0f1b306571de962480758314..d8cb53ef435134b67e2f08fc5c8470f6697436d0 100644 (file)
@@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
        bool force_dvi =
                READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        clock = mode->clock;
 
        if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        int desired_bpp;
        bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
 
        if (pipe_config->has_hdmi_sink)
index 15434cad543001317be875f1a266e3fef6636042..7c4c8fb1dae465bdaba562ed536e68918b7eb6b8 100644 (file)
@@ -1545,11 +1545,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
        batch = gen8_emit_flush_coherentl3_wa(engine, batch);
 
+       *batch++ = MI_LOAD_REGISTER_IMM(3);
+
        /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
-       *batch++ = MI_LOAD_REGISTER_IMM(1);
        *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
        *batch++ = _MASKED_BIT_DISABLE(
                        GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+
+       /* BSpec: 11391 */
+       *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
+       *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
+
+       /* BSpec: 11299 */
+       *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
+       *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
+
        *batch++ = MI_NOOP;
 
        /* WaClearSlmSpaceAtContextSwitch:kbl */
@@ -2641,10 +2651,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
        context_size += LRC_HEADER_PAGES * PAGE_SIZE;
 
        ctx_obj = i915_gem_object_create(ctx->i915, context_size);
-       if (IS_ERR(ctx_obj)) {
-               ret = PTR_ERR(ctx_obj);
-               goto error_deref_obj;
-       }
+       if (IS_ERR(ctx_obj))
+               return PTR_ERR(ctx_obj);
 
        vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
        if (IS_ERR(vma)) {
index d278f24ba6ae58bf4a704dc57610a53e9b042d25..48f618dc9abbb9de03ddf594cddf67d154a872e1 100644 (file)
@@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
        int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
        if (mode->hdisplay > fixed_mode->hdisplay)
                return MODE_PANEL;
        if (mode->vdisplay > fixed_mode->vdisplay)
@@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
                               adjusted_mode);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        if (HAS_PCH_SPLIT(dev_priv)) {
                pipe_config->has_pch_encoder = true;
 
index 25005023c243cb0526baf01f9c90d90dd3df4da2..26975df4e593b9a899c060cee398ffbdb7b5052b 100644 (file)
@@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
                                                           adjusted_mode);
        }
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        /*
         * Make the CRTC code factor in the SDVO pixel multiplier.  The
         * SDVO device will factor out the multiplier during mode_set.
@@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (intel_sdvo->pixel_clock_min > mode->clock)
                return MODE_CLOCK_LOW;
 
index 885fc3809f7f904e8bc82e0eeee9cddd3dcef3f4..b55b5c157e384158d6c55dab6a693c2e9e5fea21 100644 (file)
@@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
        const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (mode->clock > max_dotclk)
                return MODE_CLOCK_HIGH;
 
@@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
                        struct drm_connector_state *conn_state)
 {
        const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
 
        if (!tv_mode)
                return false;
 
-       pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
+       adjusted_mode->crtc_clock = tv_mode->clock;
        DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
        pipe_config->pipe_bpp = 8*3;
 
        /* TV has it's own notion of sync and other mode flags, so clear them. */
-       pipe_config->base.adjusted_mode.flags = 0;
+       adjusted_mode->flags = 0;
 
        /*
         * FIXME: We don't check whether the input mode is actually what we want
index 32b1a6cdecfc05133147e6ff85c959f4668362f1..d3443125e66164a863fb41bfdb435a1ce13340b6 100644 (file)
@@ -197,8 +197,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        priv->io_base = regs;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
-       if (!res)
-               return -EINVAL;
+       if (!res) {
+               ret = -EINVAL;
+               goto free_drm;
+       }
        /* Simply ioremap since it may be a shared register zone */
        regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!regs) {
@@ -215,8 +217,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
-       if (!res)
-               return -EINVAL;
+       if (!res) {
+               ret = -EINVAL;
+               goto free_drm;
+       }
        /* Simply ioremap since it may be a shared register zone */
        regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!regs) {
index 291c08117ab65337f7a9d8567cec08207cd555db..397143b639c64ba6dbd7c1144c0314aae954339f 100644 (file)
@@ -132,7 +132,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
 
        nvif_object_map(&wndw->wimm.base.user, NULL, 0);
        wndw->immd = func;
-       wndw->ctxdma.parent = &disp->core->chan.base.user;
+       wndw->ctxdma.parent = NULL;
        return 0;
 }
 
index 224963b533a69163b39bed8cbf175e637befd591..c5a9bc1af5af79038d938de1f8cb3a85dc35dac8 100644 (file)
@@ -444,14 +444,17 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
        if (ret)
                return ret;
 
-       ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
-       if (IS_ERR(ctxdma)) {
-               nouveau_bo_unpin(fb->nvbo);
-               return PTR_ERR(ctxdma);
+       if (wndw->ctxdma.parent) {
+               ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+               if (IS_ERR(ctxdma)) {
+                       nouveau_bo_unpin(fb->nvbo);
+                       return PTR_ERR(ctxdma);
+               }
+
+               asyw->image.handle[0] = ctxdma->object.handle;
        }
 
        asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
-       asyw->image.handle[0] = ctxdma->object.handle;
        asyw->image.offset[0] = fb->nvbo->bo.offset;
 
        if (wndw->func->prepare) {
index b8cda94492412c820c2d44ec0f40afa298f163b0..768207fbbae3d8d23e287428ccca34bcdfedbda2 100644 (file)
@@ -623,7 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        struct qxl_cursor_cmd *cmd;
        struct qxl_cursor *cursor;
        struct drm_gem_object *obj;
-       struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
+       struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
        int ret;
        void *user_ptr;
        int size = 64*64*4;
@@ -677,7 +677,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                                                           cursor_bo, 0);
                cmd->type = QXL_CURSOR_SET;
 
-               qxl_bo_unref(&qcrtc->cursor_bo);
+               old_cursor_bo = qcrtc->cursor_bo;
                qcrtc->cursor_bo = cursor_bo;
                cursor_bo = NULL;
        } else {
@@ -697,6 +697,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
        qxl_release_fence_buffer_objects(release);
 
+       if (old_cursor_bo)
+               qxl_bo_unref(&old_cursor_bo);
+
        qxl_bo_unref(&cursor_bo);
 
        return;
index 08747fc3ee713d6ba796b946103334b302a48758..8232b39e16ca700d17ebfae9ab207a0a3c4d5c78 100644 (file)
@@ -17,7 +17,6 @@
 #include <drm/drm_encoder.h>
 #include <drm/drm_modes.h>
 #include <drm/drm_of.h>
-#include <drm/drm_panel.h>
 
 #include <uapi/drm/drm_mode.h>
 
@@ -418,9 +417,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
                                     const struct drm_display_mode *mode)
 {
-       struct drm_panel *panel = tcon->panel;
-       struct drm_connector *connector = panel->connector;
-       struct drm_display_info display_info = connector->display_info;
        unsigned int bp, hsync, vsync;
        u8 clk_delay;
        u32 val = 0;
@@ -478,27 +474,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
 
-       /*
-        * On A20 and similar SoCs, the only way to achieve Positive Edge
-        * (Rising Edge), is setting dclk clock phase to 2/3(240°).
-        * By default TCON works in Negative Edge(Falling Edge),
-        * this is why phase is set to 0 in that case.
-        * Unfortunately there's no way to logically invert dclk through
-        * IO_POL register.
-        * The only acceptable way to work, triple checked with scope,
-        * is using clock phase set to 0° for Negative Edge and set to 240°
-        * for Positive Edge.
-        * On A33 and similar SoCs there would be a 90° phase option,
-        * but it divides also dclk by 2.
-        * Following code is a way to avoid quirks all around TCON
-        * and DOTCLOCK drivers.
-        */
-       if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
-               clk_set_phase(tcon->dclk, 240);
-
-       if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
-               clk_set_phase(tcon->dclk, 0);
-
        regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
                           SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
                           val);
index 7b8e17b03cb864a7bc0ab0cbd114594b01f28c5c..6bf4da7ad63a51f3b9aa6713552c96be6042bba2 100644 (file)
@@ -124,6 +124,8 @@ static const struct hid_device_id hammer_devices[] = {
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, hammer_devices);
index a85634fe033f01bd6f9a2b41c67d027c0b55ccc1..c7981ddd8776377faa9a238b8e58d6162054b9c6 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_TOUCH_ROSE        0x5028
 #define USB_DEVICE_ID_GOOGLE_STAFF     0x502b
 #define USB_DEVICE_ID_GOOGLE_WAND      0x502d
+#define USB_DEVICE_ID_GOOGLE_WHISKERS  0x5030
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index cb86cc834201c89f660daa3509722f6fa72cb98c..0422ec2b13d208d98acdf22c5eb97b6393c5f530 100644 (file)
@@ -573,7 +573,7 @@ static bool steam_is_valve_interface(struct hid_device *hdev)
 
 static int steam_client_ll_parse(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        return hid_parse_report(hdev, steam->hdev->dev_rdesc,
                        steam->hdev->dev_rsize);
@@ -590,7 +590,7 @@ static void steam_client_ll_stop(struct hid_device *hdev)
 
 static int steam_client_ll_open(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
        int ret;
 
        ret = hid_hw_open(steam->hdev);
@@ -605,7 +605,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
 
 static void steam_client_ll_close(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        mutex_lock(&steam->mutex);
        steam->client_opened = false;
@@ -623,7 +623,7 @@ static int steam_client_ll_raw_request(struct hid_device *hdev,
                                size_t count, unsigned char report_type,
                                int reqtype)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        return hid_hw_raw_request(steam->hdev, reportnum, buf, count,
                        report_type, reqtype);
@@ -710,7 +710,7 @@ static int steam_probe(struct hid_device *hdev,
                ret = PTR_ERR(steam->client_hdev);
                goto client_hdev_fail;
        }
-       hid_set_drvdata(steam->client_hdev, steam);
+       steam->client_hdev->driver_data = steam;
 
        /*
         * With the real steam controller interface, do not connect hidraw.
index 582e449be9feeeebd5924fea4a28aab4a5c2e8a2..a2c53ea3b5edfce82eeb2ef4b4e2392f5c7fb98f 100644 (file)
@@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
        kfree(ishtp_dev);
 }
 
-#ifdef CONFIG_PM
-static struct device *ish_resume_device;
+static struct device __maybe_unused *ish_resume_device;
 
 /* 50ms to get resume response */
 #define WAIT_FOR_RESUME_ACK_MS         50
@@ -220,7 +219,7 @@ static struct device *ish_resume_device;
  * in that case a simple resume message is enough, others we need
  * a reset sequence.
  */
-static void ish_resume_handler(struct work_struct *work)
+static void __maybe_unused ish_resume_handler(struct work_struct *work)
 {
        struct pci_dev *pdev = to_pci_dev(ish_resume_device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
  *
  * Return: 0 to the pm core
  */
-static int ish_suspend(struct device *device)
+static int __maybe_unused ish_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
        return 0;
 }
 
-static DECLARE_WORK(resume_work, ish_resume_handler);
+static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
 /**
  * ish_resume() - ISH resume callback
  * @device:    device pointer
@@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
  *
  * Return: 0 to the pm core
  */
-static int ish_resume(struct device *device)
+static int __maybe_unused ish_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
        return 0;
 }
 
-static const struct dev_pm_ops ish_pm_ops = {
-       .suspend = ish_suspend,
-       .resume = ish_resume,
-};
-#define ISHTP_ISH_PM_OPS       (&ish_pm_ops)
-#else
-#define ISHTP_ISH_PM_OPS       NULL
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
 
 static struct pci_driver ish_driver = {
        .name = KBUILD_MODNAME,
        .id_table = ish_pci_tbl,
        .probe = ish_probe,
        .remove = ish_remove,
-       .driver.pm = ISHTP_ISH_PM_OPS,
+       .driver.pm = &ish_pm_ops,
 };
 
 module_pci_driver(ish_driver);
index c101369b51de88b927fdf2295f3bb664ed415899..d6797535fff97217b477cf7c2009397dcce2d1ec 100644 (file)
@@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
                }
        }
 
+       /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
+       if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+           hdev->product == 0x0358 &&
+           WACOM_PEN_FIELD(field) &&
+           wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
+               field->logical_maximum = 43200;
+       }
+
        switch (usage->hid) {
        case HID_GD_X:
                features->x_max = field->logical_maximum;
index bf3bb7e1adab8579cf7647a24cdba12c9c828050..9d3ef879dc51e1aa08848649cfaec435a6f882fa 100644 (file)
@@ -1074,6 +1074,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
                        DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
                },
        },
+       {
+               .ident = "Dell XPS13 9333",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+               },
+       },
        { }
 };
 
index e88c019619481d16ab7690ecc8a862f94733e309..33d51281272bb066762d80b46161b4f7f44113b6 100644 (file)
@@ -394,12 +394,16 @@ static const char * const hwmon_power_attr_templates[] = {
        [hwmon_power_cap_hyst] = "power%d_cap_hyst",
        [hwmon_power_cap_max] = "power%d_cap_max",
        [hwmon_power_cap_min] = "power%d_cap_min",
+       [hwmon_power_min] = "power%d_min",
        [hwmon_power_max] = "power%d_max",
+       [hwmon_power_lcrit] = "power%d_lcrit",
        [hwmon_power_crit] = "power%d_crit",
        [hwmon_power_label] = "power%d_label",
        [hwmon_power_alarm] = "power%d_alarm",
        [hwmon_power_cap_alarm] = "power%d_cap_alarm",
+       [hwmon_power_min_alarm] = "power%d_min_alarm",
        [hwmon_power_max_alarm] = "power%d_max_alarm",
+       [hwmon_power_lcrit_alarm] = "power%d_lcrit_alarm",
        [hwmon_power_crit_alarm] = "power%d_crit_alarm",
 };
 
index 155d4d1d1585af4aa7debc37163072af4979bd02..f9d1349c328698aa510e57d10c6b7b475999d16f 100644 (file)
@@ -4175,7 +4175,7 @@ static int nct6775_probe(struct platform_device *pdev)
         * The temperature is already monitored if the respective bit in <mask>
         * is set.
         */
-       for (i = 0; i < 32; i++) {
+       for (i = 0; i < 31; i++) {
                if (!(data->temp_mask & BIT(i + 1)))
                        continue;
                if (!reg_temp_alternate[i])
index 4a34f311e1ff4df2cd6cdfcff7ea1662c95a06eb..6ec65adaba49569ab7b9775f856859a0fcfbd967 100644 (file)
@@ -647,10 +647,10 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
        if (bit_adap->getscl == NULL)
                adap->quirks = &i2c_bit_quirk_no_clk_stretch;
 
-       /* Bring bus to a known state. Looks like STOP if bus is not free yet */
-       setscl(bit_adap, 1);
-       udelay(bit_adap->udelay);
-       setsda(bit_adap, 1);
+       /*
+        * We tried forcing SCL/SDA to an initial state here. But that caused a
+        * regression, sadly. Check Bugzilla #200045 for details.
+        */
 
        ret = add_adapter(adap);
        if (ret < 0)
index 005e6e0330c278276a0d602fcfebdc3429218cfd..66f85bbf35917161cc36e4ffb308d78b8401c0cb 100644 (file)
@@ -279,9 +279,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
         * required for an I2C bus.
         */
        if (pdata->scl_is_open_drain)
-               gflags = GPIOD_OUT_LOW;
+               gflags = GPIOD_OUT_HIGH;
        else
-               gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+               gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
        priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
        if (IS_ERR(priv->scl))
                return PTR_ERR(priv->scl);
index f3f683041e7f9199ad5799ef5c8fd83f59fc9856..51970bae3c4a5a4d08f03ae558816fd9c264996b 100644 (file)
@@ -465,15 +465,18 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
 
        status = i2c_transfer(adapter, msg, num);
        if (status < 0)
-               return status;
-       if (status != num)
-               return -EIO;
+               goto cleanup;
+       if (status != num) {
+               status = -EIO;
+               goto cleanup;
+       }
+       status = 0;
 
        /* Check PEC if last message is a read */
        if (i && (msg[num-1].flags & I2C_M_RD)) {
                status = i2c_smbus_check_pec(partial_pec, &msg[num-1]);
                if (status < 0)
-                       return status;
+                       goto cleanup;
        }
 
        if (read_write == I2C_SMBUS_READ)
@@ -499,12 +502,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
                        break;
                }
 
+cleanup:
        if (msg[0].flags & I2C_M_DMA_SAFE)
                kfree(msg[0].buf);
        if (msg[1].flags & I2C_M_DMA_SAFE)
                kfree(msg[1].buf);
 
-       return 0;
+       return status;
 }
 
 /**
index 7e3d82cff3d5f2537608c0a21d9bf277767e7bd8..c149c9c360fc4f265ce1e406e1dd8ba7ae5615d8 100644 (file)
@@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
        if (src < 0)
                return IRQ_NONE;
 
-       if (!(src & data->chip_info->enabled_events))
+       if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY)))
                return IRQ_NONE;
 
        if (src & MMA8452_INT_DRDY) {
index f9c0624505a2993e3a48d9a581faa8a26e9287de..42618fe4f83ed82d0f50b92e884dd59a11a1df09 100644 (file)
@@ -959,6 +959,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
        }
 
        irq_type = irqd_get_trigger_type(desc);
+       if (!irq_type)
+               irq_type = IRQF_TRIGGER_RISING;
        if (irq_type == IRQF_TRIGGER_RISING)
                st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
        else if (irq_type == IRQF_TRIGGER_FALLING)
index 34d42a2504c92bf43ee1216f1855e9febfd03161..df5b2a0da96c4a9c311ddd6da57f990f6d1f821f 100644 (file)
@@ -582,6 +582,8 @@ static int tsl2772_als_calibrate(struct iio_dev *indio_dev)
                        "%s: failed to get lux\n", __func__);
                return lux_val;
        }
+       if (lux_val == 0)
+               return -ERANGE;
 
        ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
                        lux_val;
index 5ec3e41b65f2b8f991626a4522d6263d66ca2a27..fe87d27779d96b99ce4f847c9a9e02a4a1a87aa7 100644 (file)
@@ -415,10 +415,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
        }
        comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
 
-       *val = comp_humidity;
-       *val2 = 1024;
+       *val = comp_humidity * 1000 / 1024;
 
-       return IIO_VAL_FRACTIONAL;
+       return IIO_VAL_INT;
 }
 
 static int bmp280_read_raw(struct iio_dev *indio_dev,
index 3ae2339dd27a9f5b6c4d104674d096f9f22b5b67..2094d136513d6c5f144663ad9e74192fd85a191a 100644 (file)
@@ -736,10 +736,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
        if (ret)
                return ret;
 
-       if (!file->ucontext &&
-           (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended))
-               return -EINVAL;
-
        if (extended) {
                if (count < (sizeof(hdr) + sizeof(ex_hdr)))
                        return -EINVAL;
@@ -759,6 +755,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                goto out;
        }
 
+       /*
+        * Must be after the ib_dev check, as once the RCU clears ib_dev ==
+        * NULL means ucontext == NULL
+        */
+       if (!file->ucontext &&
+           (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        if (!verify_command_mask(ib_dev, command, extended)) {
                ret = -EOPNOTSUPP;
                goto out;
index 0b56828c1319b1b350385dfd5be2c981d26d6ffa..9d6beb948535bec89545e9f9b25f7b83976a654e 100644 (file)
@@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
 
 /* Completion queues */
 
-struct ib_cq *ib_create_cq(struct ib_device *device,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(struct ib_event *, void *),
-                          void *cq_context,
-                          const struct ib_cq_init_attr *cq_attr)
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+                            ib_comp_handler comp_handler,
+                            void (*event_handler)(struct ib_event *, void *),
+                            void *cq_context,
+                            const struct ib_cq_init_attr *cq_attr,
+                            const char *caller)
 {
        struct ib_cq *cq;
 
@@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
                cq->cq_context    = cq_context;
                atomic_set(&cq->usecnt, 0);
                cq->res.type = RDMA_RESTRACK_CQ;
+               cq->res.kern_name = caller;
                rdma_restrack_add(&cq->res);
        }
 
        return cq;
 }
-EXPORT_SYMBOL(ib_create_cq);
+EXPORT_SYMBOL(__ib_create_cq);
 
 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
 {
index 5d65582fe4d92f4840cdb84db8ae5d6f3dcf8652..616fc9b6fad8f41e28f5114f4d6afd4bd8e8788e 100644 (file)
@@ -423,7 +423,7 @@ static netdev_tx_t hfi1_netdev_start_xmit(struct sk_buff *skb,
 
 static u16 hfi1_vnic_select_queue(struct net_device *netdev,
                                  struct sk_buff *skb,
-                                 void *accel_priv,
+                                 struct net_device *sb_dev,
                                  select_queue_fallback_t fallback)
 {
        struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
index ed1f253faf977c5bf3b4f15f4a1ea8992e817d88..c7c85c22e4e3291a343319ffcdb2e00034d7cc5f 100644 (file)
@@ -486,8 +486,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
        }
 
        if (flags & IB_MR_REREG_ACCESS) {
-               if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
-                       return -EPERM;
+               if (ib_access_writable(mr_access_flags) &&
+                   !mmr->umem->writable) {
+                       err = -EPERM;
+                       goto release_mpt_entry;
+               }
 
                err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
                                               convert_access(mr_access_flags));
index e52dd21519b45ff00268ae33c21816a8b5a96b53..b3ba9a222550750f9c92a1ea8d1cf23b93e05d12 100644 (file)
@@ -3199,8 +3199,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters,
        if (!mcounters->hw_cntrs_hndl) {
                mcounters->hw_cntrs_hndl = mlx5_fc_create(
                        to_mdev(ibcounters->device)->mdev, false);
-               if (!mcounters->hw_cntrs_hndl) {
-                       ret = -ENOMEM;
+               if (IS_ERR(mcounters->hw_cntrs_hndl)) {
+                       ret = PTR_ERR(mcounters->hw_cntrs_hndl);
                        goto free;
                }
                hw_hndl = true;
@@ -3546,29 +3546,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
                        return ERR_PTR(-ENOMEM);
 
                err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
-               if (err) {
-                       kfree(ucmd);
-                       return ERR_PTR(err);
-               }
+               if (err)
+                       goto free_ucmd;
        }
 
-       if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
-               return ERR_PTR(-ENOMEM);
+       if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
+               err = -ENOMEM;
+               goto free_ucmd;
+       }
 
        if (domain != IB_FLOW_DOMAIN_USER ||
            flow_attr->port > dev->num_ports ||
            (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
-                                 IB_FLOW_ATTR_FLAGS_EGRESS)))
-               return ERR_PTR(-EINVAL);
+                                 IB_FLOW_ATTR_FLAGS_EGRESS))) {
+               err = -EINVAL;
+               goto free_ucmd;
+       }
 
        if (is_egress &&
            (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
-            flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT))
-               return ERR_PTR(-EINVAL);
+            flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+               err = -EINVAL;
+               goto free_ucmd;
+       }
 
        dst = kzalloc(sizeof(*dst), GFP_KERNEL);
-       if (!dst)
-               return ERR_PTR(-ENOMEM);
+       if (!dst) {
+               err = -ENOMEM;
+               goto free_ucmd;
+       }
 
        mutex_lock(&dev->flow_db->lock);
 
@@ -3637,8 +3643,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
 unlock:
        mutex_unlock(&dev->flow_db->lock);
        kfree(dst);
+free_ucmd:
        kfree(ucmd);
-       kfree(handler);
        return ERR_PTR(err);
 }
 
@@ -6107,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
                             MLX5_CAP_GEN(mdev, num_vhca_ports));
 
-       if (MLX5_VPORT_MANAGER(mdev) &&
+       if (MLX5_ESWITCH_MANAGER(mdev) &&
            mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
                dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
 
index f7ac8fc9b531d7550fb0b41233b55e0bec51b4ff..f07b8df96f43954e67d4dfc32148e96a751e6974 100644 (file)
@@ -1957,6 +1957,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        }
 
        if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+               if (rdma_protocol_iwarp(&dev->ibdev, 1))
+                       return -EINVAL;
+
                if (attr_mask & IB_QP_PATH_MTU) {
                        if (attr->path_mtu < IB_MTU_256 ||
                            attr->path_mtu > IB_MTU_4096) {
index f30eeba3f772c5a8e0433cdc6b6fcaa47076583c..8be27238a86e4ee1f160b4058e9517ef58708d26 100644 (file)
@@ -645,6 +645,9 @@ int rxe_requester(void *arg)
                } else {
                        goto exit;
                }
+               if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+                   qp->sq_sig_type == IB_SIGNAL_ALL_WR)
+                       rxe_run_task(&qp->comp.task, 1);
                qp->req.wqe_index = next_index(qp->sq.queue,
                                                qp->req.wqe_index);
                goto next_wqe;
@@ -709,6 +712,7 @@ int rxe_requester(void *arg)
 
        if (fill_packet(qp, wqe, &pkt, skb, payload)) {
                pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
+               kfree_skb(skb);
                goto err;
        }
 
@@ -740,7 +744,6 @@ int rxe_requester(void *arg)
        goto next_wqe;
 
 err:
-       kfree_skb(skb);
        wqe->status = IB_WC_LOC_PROT_ERR;
        wqe->state = wqe_state_error;
        __rxe_do_task(&qp->comp.task);
index 0c8aec62a42539fc90d67000361653fbf846ae3b..61558788b3fadb7546660f3b907820b264e3b1aa 100644 (file)
@@ -95,7 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
 }
 
 static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
-                                void *accel_priv,
+                                struct net_device *sb_dev,
                                 select_queue_fallback_t fallback)
 {
        struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
@@ -107,7 +107,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
        mdata->entropy = opa_vnic_calc_entropy(skb);
        mdata->vl = opa_vnic_get_vl(adapter, skb);
        rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
-                                              accel_priv, fallback);
+                                              sb_dev, fallback);
        skb_pull(skb, sizeof(*mdata));
        return rc;
 }
index cf30523c6ef64c956e5ebf77c730c6bb146c4a1f..6c7326c93721c495c4e61a73cac2dfaf9a5bc8fc 100644 (file)
@@ -131,8 +131,10 @@ EXPORT_SYMBOL(input_mt_destroy_slots);
  * inactive, or if the tool type is changed, a new tracking id is
  * assigned to the slot. The tool type is only reported if the
  * corresponding absbit field is set.
+ *
+ * Returns true if contact is active.
  */
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active)
 {
        struct input_mt *mt = dev->mt;
@@ -140,22 +142,24 @@ void input_mt_report_slot_state(struct input_dev *dev,
        int id;
 
        if (!mt)
-               return;
+               return false;
 
        slot = &mt->slots[mt->slot];
        slot->frame = mt->frame;
 
        if (!active) {
                input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
-               return;
+               return false;
        }
 
        id = input_mt_get_value(slot, ABS_MT_TRACKING_ID);
-       if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type)
+       if (id < 0)
                id = input_mt_new_trkid(mt);
 
        input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id);
        input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type);
+
+       return true;
 }
 EXPORT_SYMBOL(input_mt_report_slot_state);
 
index 48e36acbeb496db7f5033029158a645f8d3cdb27..cd620e009bada3a8f8c1e70b99be25100bea9c44 100644 (file)
@@ -125,7 +125,7 @@ static const struct xpad_device {
        u8 mapping;
        u8 xtype;
 } xpad_device[] = {
-       { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
+       { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
        { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
index f6e643b589b616c61d1751005fedb3288f0ad82c..e8dae6195b30500934f23738f995201521e05160 100644 (file)
@@ -45,7 +45,7 @@ struct event_dev {
 static irqreturn_t events_interrupt(int irq, void *dev_id)
 {
        struct event_dev *edev = dev_id;
-       unsigned type, code, value;
+       unsigned int type, code, value;
 
        type = __raw_readl(edev->addr + REG_READ);
        code = __raw_readl(edev->addr + REG_READ);
@@ -57,7 +57,7 @@ static irqreturn_t events_interrupt(int irq, void *dev_id)
 }
 
 static void events_import_bits(struct event_dev *edev,
-                       unsigned long bits[], unsigned type, size_t count)
+                       unsigned long bits[], unsigned int type, size_t count)
 {
        void __iomem *addr = edev->addr;
        int i, j;
@@ -99,6 +99,7 @@ static void events_import_abs_params(struct event_dev *edev)
 
                for (j = 0; j < ARRAY_SIZE(val); j++) {
                        int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+
                        val[j] = __raw_readl(edev->addr + REG_DATA + offset);
                }
 
@@ -112,7 +113,7 @@ static int events_probe(struct platform_device *pdev)
        struct input_dev *input_dev;
        struct event_dev *edev;
        struct resource *res;
-       unsigned keymapnamelen;
+       unsigned int keymapnamelen;
        void __iomem *addr;
        int irq;
        int i;
@@ -150,7 +151,7 @@ static int events_probe(struct platform_device *pdev)
        for (i = 0; i < keymapnamelen; i++)
                edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
 
-       pr_debug("events_probe() keymap=%s\n", edev->name);
+       pr_debug("%s: keymap=%s\n", __func__, edev->name);
 
        input_dev->name = edev->name;
        input_dev->id.bustype = BUS_HOST;
index c25606e006938743d64498429cf3d0b69768d7fb..ca59a2be9bc5344f65389ea7372a7740b74b5343 100644 (file)
@@ -841,4 +841,14 @@ config INPUT_RAVE_SP_PWRBUTTON
          To compile this driver as a module, choose M here: the
          module will be called rave-sp-pwrbutton.
 
+config INPUT_SC27XX_VIBRA
+       tristate "Spreadtrum sc27xx vibrator support"
+       depends on MFD_SC27XX_PMIC || COMPILE_TEST
+       select INPUT_FF_MEMLESS
+       help
+         This option enables support for Spreadtrum sc27xx vibrator driver.
+
+         To compile this driver as a module, choose M here. The module will
+         be called sc27xx_vibra.
+
 endif
index 72cde28649e2c0bc4fec14f6898445d2f79880dc..9d0f9d1ff68f41a5ec7f13101bb11176e8fd8729 100644 (file)
@@ -66,6 +66,7 @@ obj-$(CONFIG_INPUT_RETU_PWRBUTTON)    += retu-pwrbutton.o
 obj-$(CONFIG_INPUT_AXP20X_PEK)         += axp20x-pek.o
 obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER)        += rotary_encoder.o
 obj-$(CONFIG_INPUT_RK805_PWRKEY)       += rk805-pwrkey.o
+obj-$(CONFIG_INPUT_SC27XX_VIBRA)       += sc27xx-vibra.o
 obj-$(CONFIG_INPUT_SGI_BTNS)           += sgi_btns.o
 obj-$(CONFIG_INPUT_SIRFSOC_ONKEY)      += sirfsoc-onkey.o
 obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY)   += soc_button_array.o
diff --git a/drivers/input/misc/sc27xx-vibra.c b/drivers/input/misc/sc27xx-vibra.c
new file mode 100644 (file)
index 0000000..295251a
--- /dev/null
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+
+#define CUR_DRV_CAL_SEL                GENMASK(13, 12)
+#define SLP_LDOVIBR_PD_EN      BIT(9)
+#define LDO_VIBR_PD            BIT(8)
+
+struct vibra_info {
+       struct input_dev        *input_dev;
+       struct work_struct      play_work;
+       struct regmap           *regmap;
+       u32                     base;
+       u32                     strength;
+       bool                    enabled;
+};
+
+static void sc27xx_vibra_set(struct vibra_info *info, bool on)
+{
+       if (on) {
+               regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, 0);
+               regmap_update_bits(info->regmap, info->base,
+                                  SLP_LDOVIBR_PD_EN, 0);
+               info->enabled = true;
+       } else {
+               regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD,
+                                  LDO_VIBR_PD);
+               regmap_update_bits(info->regmap, info->base,
+                                  SLP_LDOVIBR_PD_EN, SLP_LDOVIBR_PD_EN);
+               info->enabled = false;
+       }
+}
+
+static int sc27xx_vibra_hw_init(struct vibra_info *info)
+{
+       return regmap_update_bits(info->regmap, info->base, CUR_DRV_CAL_SEL, 0);
+}
+
+static void sc27xx_vibra_play_work(struct work_struct *work)
+{
+       struct vibra_info *info = container_of(work, struct vibra_info,
+                                              play_work);
+
+       if (info->strength && !info->enabled)
+               sc27xx_vibra_set(info, true);
+       else if (info->strength == 0 && info->enabled)
+               sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_play(struct input_dev *input, void *data,
+                            struct ff_effect *effect)
+{
+       struct vibra_info *info = input_get_drvdata(input);
+
+       info->strength = effect->u.rumble.weak_magnitude;
+       schedule_work(&info->play_work);
+
+       return 0;
+}
+
+static void sc27xx_vibra_close(struct input_dev *input)
+{
+       struct vibra_info *info = input_get_drvdata(input);
+
+       cancel_work_sync(&info->play_work);
+       if (info->enabled)
+               sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_probe(struct platform_device *pdev)
+{
+       struct vibra_info *info;
+       int error;
+
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+       if (!info->regmap) {
+               dev_err(&pdev->dev, "failed to get vibrator regmap.\n");
+               return -ENODEV;
+       }
+
+       error = device_property_read_u32(&pdev->dev, "reg", &info->base);
+       if (error) {
+               dev_err(&pdev->dev, "failed to get vibrator base address.\n");
+               return error;
+       }
+
+       info->input_dev = devm_input_allocate_device(&pdev->dev);
+       if (!info->input_dev) {
+               dev_err(&pdev->dev, "failed to allocate input device.\n");
+               return -ENOMEM;
+       }
+
+       info->input_dev->name = "sc27xx:vibrator";
+       info->input_dev->id.version = 0;
+       info->input_dev->close = sc27xx_vibra_close;
+
+       input_set_drvdata(info->input_dev, info);
+       input_set_capability(info->input_dev, EV_FF, FF_RUMBLE);
+       INIT_WORK(&info->play_work, sc27xx_vibra_play_work);
+       info->enabled = false;
+
+       error = sc27xx_vibra_hw_init(info);
+       if (error) {
+               dev_err(&pdev->dev, "failed to initialize the vibrator.\n");
+               return error;
+       }
+
+       error = input_ff_create_memless(info->input_dev, NULL,
+                                       sc27xx_vibra_play);
+       if (error) {
+               dev_err(&pdev->dev, "failed to register vibrator to FF.\n");
+               return error;
+       }
+
+       error = input_register_device(info->input_dev);
+       if (error) {
+               dev_err(&pdev->dev, "failed to register input device.\n");
+               return error;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id sc27xx_vibra_of_match[] = {
+       { .compatible = "sprd,sc2731-vibrator", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, sc27xx_vibra_of_match);
+
+static struct platform_driver sc27xx_vibra_driver = {
+       .driver = {
+               .name = "sc27xx-vibrator",
+               .of_match_table = sc27xx_vibra_of_match,
+       },
+       .probe = sc27xx_vibra_probe,
+};
+
+module_platform_driver(sc27xx_vibra_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC27xx Vibrator Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xiaotong Lu <xiaotong.lu@spreadtrum.com>");
index 599544c1a91cd365261b6ca2ec4e4f3149b0a63d..243e0fa6e3e3cb44ce22adc6e76421fda79f4ff2 100644 (file)
@@ -27,6 +27,8 @@
 #define ETP_DISABLE_POWER      0x0001
 #define ETP_PRESSURE_OFFSET    25
 
+#define ETP_CALIBRATE_MAX_LEN  3
+
 /* IAP Firmware handling */
 #define ETP_PRODUCT_ID_FORMAT_STRING   "%d.0"
 #define ETP_FW_NAME            "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
index 8ff75114e7626dc3d2fa23a1d3457f1802b2a628..1f9cd7d8b7ad35e982712e00e8e95369e55cd861 100644 (file)
@@ -613,7 +613,7 @@ static ssize_t calibrate_store(struct device *dev,
        int tries = 20;
        int retval;
        int error;
-       u8 val[3];
+       u8 val[ETP_CALIBRATE_MAX_LEN];
 
        retval = mutex_lock_interruptible(&data->sysfs_mutex);
        if (retval)
@@ -1345,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN060C", 0 },
        { "ELAN0611", 0 },
        { "ELAN0612", 0 },
+       { "ELAN0618", 0 },
        { "ELAN1000", 0 },
        { }
 };
index cfcb32559925baf1acf070f908f3b91b1fc1b905..c060d270bc4d862ad7366bd87529dbdc032672b6 100644 (file)
@@ -56,7 +56,7 @@
 static int elan_smbus_initialize(struct i2c_client *client)
 {
        u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
-       u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
+       u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
        int len, error;
 
        /* Get hello packet */
@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
 static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
 {
        int error;
+       u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
+
+       BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
 
        error = i2c_smbus_read_block_data(client,
-                                         ETP_SMBUS_CALIBRATE_QUERY, val);
+                                         ETP_SMBUS_CALIBRATE_QUERY, buf);
        if (error < 0)
                return error;
 
+       memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
        return 0;
 }
 
@@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
 {
        int len;
 
+       BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
+
        len = i2c_smbus_read_block_data(client,
                                        ETP_SMBUS_PACKET_QUERY,
                                        &report[ETP_SMBUS_REPORT_OFFSET]);
index fb4d902c440345d3cbc02329ed742d48b931dc85..dd85b16dc6f889bb366a10cbd4278234d3f9763c 100644 (file)
@@ -799,7 +799,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
        else if (ic_version == 7 && etd->info.samples[1] == 0x2A)
                sanity_check = ((packet[3] & 0x1c) == 0x10);
        else
-               sanity_check = ((packet[0] & 0x0c) == 0x04 &&
+               sanity_check = ((packet[0] & 0x08) == 0x00 &&
                                (packet[3] & 0x1c) == 0x10);
 
        if (!sanity_check)
@@ -1175,6 +1175,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
        { }
 };
 
+static const char * const middle_button_pnp_ids[] = {
+       "LEN2131", /* ThinkPad P52 w/ NFC */
+       "LEN2132", /* ThinkPad P52 */
+       NULL
+};
+
 /*
  * Set the appropriate event bits for the input subsystem
  */
@@ -1194,7 +1200,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
        __clear_bit(EV_REL, dev->evbit);
 
        __set_bit(BTN_LEFT, dev->keybit);
-       if (dmi_check_system(elantech_dmi_has_middle_button))
+       if (dmi_check_system(elantech_dmi_has_middle_button) ||
+                       psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
                __set_bit(BTN_MIDDLE, dev->keybit);
        __set_bit(BTN_RIGHT, dev->keybit);
 
index 5ff5b1952be0c7afe810cef7f6f086f71928e150..d3ff1fc09af712700507d05ac3548703e49173a1 100644 (file)
@@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                        else
                                input_report_rel(dev, REL_WHEEL, -wheel);
 
-                       input_report_key(dev, BTN_SIDE,  BIT(4));
-                       input_report_key(dev, BTN_EXTRA, BIT(5));
+                       input_report_key(dev, BTN_SIDE,  packet[3] & BIT(4));
+                       input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5));
                        break;
                }
                break;
@@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                input_report_rel(dev, REL_WHEEL, -(s8) packet[3]);
 
                /* Extra buttons on Genius NewNet 3D */
-               input_report_key(dev, BTN_SIDE,  BIT(6));
-               input_report_key(dev, BTN_EXTRA, BIT(7));
+               input_report_key(dev, BTN_SIDE,  packet[0] & BIT(6));
+               input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7));
                break;
 
        case PSMOUSE_THINKPS:
                /* Extra button on ThinkingMouse */
-               input_report_key(dev, BTN_EXTRA, BIT(3));
+               input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3));
 
                /*
                 * Without this bit of weirdness moving up gives wildly
@@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                 * Cortron PS2 Trackball reports SIDE button in the
                 * 4th bit of the first byte.
                 */
-               input_report_key(dev, BTN_SIDE, BIT(3));
+               input_report_key(dev, BTN_SIDE, packet[0] & BIT(3));
                packet[0] |= BIT(3);
                break;
 
index 7172b88cd0649c8de16ac7373ec2a16c42c338d2..fad2eae4a118e793e617a86a52b28351ef4fafed 100644 (file)
@@ -3,6 +3,7 @@
 #
 config RMI4_CORE
        tristate "Synaptics RMI4 bus support"
+       select IRQ_DOMAIN
        help
          Say Y here if you want to support the Synaptics RMI4 bus.  This is
          required for all RMI4 device support.
index 8bb866c7b9855c5025d31b7be3f722d469f73da9..8eeffa066022dadb9f718f77aab1609700f05543 100644 (file)
@@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
        if (obj->type == RMI_2D_OBJECT_NONE)
                return;
 
-       if (axis_align->swap_axes)
-               swap(obj->x, obj->y);
-
        if (axis_align->flip_x)
                obj->x = sensor->max_x - obj->x;
 
        if (axis_align->flip_y)
                obj->y = sensor->max_y - obj->y;
 
+       if (axis_align->swap_axes)
+               swap(obj->x, obj->y);
+
        /*
         * Here checking if X offset or y offset are specified is
         * redundant. We just add the offsets or clip the values.
@@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y)
        x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x));
        y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y));
 
-       if (axis_align->swap_axes)
-               swap(x, y);
-
        if (axis_align->flip_x)
                x = min(RMI_2D_REL_POS_MAX, -x);
 
        if (axis_align->flip_y)
                y = min(RMI_2D_REL_POS_MAX, -y);
 
+       if (axis_align->swap_axes)
+               swap(x, y);
+
        if (x || y) {
                input_report_rel(sensor->input, REL_X, x);
                input_report_rel(sensor->input, REL_Y, y);
@@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
        struct input_dev *input = sensor->input;
        int res_x;
        int res_y;
+       int max_x, max_y;
        int input_flags = 0;
 
        if (sensor->report_abs) {
-               if (sensor->axis_align.swap_axes) {
-                       swap(sensor->max_x, sensor->max_y);
-                       swap(sensor->axis_align.clip_x_low,
-                            sensor->axis_align.clip_y_low);
-                       swap(sensor->axis_align.clip_x_high,
-                            sensor->axis_align.clip_y_high);
-               }
-
                sensor->min_x = sensor->axis_align.clip_x_low;
                if (sensor->axis_align.clip_x_high)
                        sensor->max_x = min(sensor->max_x,
@@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
                                sensor->axis_align.clip_y_high);
 
                set_bit(EV_ABS, input->evbit);
-               input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x,
-                                       0, 0);
-               input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y,
-                                       0, 0);
+
+               max_x = sensor->max_x;
+               max_y = sensor->max_y;
+               if (sensor->axis_align.swap_axes)
+                       swap(max_x, max_y);
+               input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+               input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
 
                if (sensor->x_mm && sensor->y_mm) {
                        res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm;
                        res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm;
+                       if (sensor->axis_align.swap_axes)
+                               swap(res_x, res_y);
 
                        input_abs_set_res(input, ABS_X, res_x);
                        input_abs_set_res(input, ABS_Y, res_y);
index c5fa53adba8d01318cfeacea440360c51c044a7d..bd0d5ff01b08f9c88920b03f56dbb4a3eed21af3 100644 (file)
@@ -9,6 +9,8 @@
 
 #include <linux/kernel.h>
 #include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/list.h>
 #include <linux/pm.h>
 #include <linux/rmi.h>
@@ -167,6 +169,39 @@ static inline void rmi_function_of_probe(struct rmi_function *fn)
 {}
 #endif
 
+static struct irq_chip rmi_irq_chip = {
+       .name = "rmi4",
+};
+
+static int rmi_create_function_irq(struct rmi_function *fn,
+                                  struct rmi_function_handler *handler)
+{
+       struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
+       int i, error;
+
+       for (i = 0; i < fn->num_of_irqs; i++) {
+               set_bit(fn->irq_pos + i, fn->irq_mask);
+
+               fn->irq[i] = irq_create_mapping(drvdata->irqdomain,
+                                               fn->irq_pos + i);
+
+               irq_set_chip_data(fn->irq[i], fn);
+               irq_set_chip_and_handler(fn->irq[i], &rmi_irq_chip,
+                                        handle_simple_irq);
+               irq_set_nested_thread(fn->irq[i], 1);
+
+               error = devm_request_threaded_irq(&fn->dev, fn->irq[i], NULL,
+                                       handler->attention, IRQF_ONESHOT,
+                                       dev_name(&fn->dev), fn);
+               if (error) {
+                       dev_err(&fn->dev, "Error %d registering IRQ\n", error);
+                       return error;
+               }
+       }
+
+       return 0;
+}
+
 static int rmi_function_probe(struct device *dev)
 {
        struct rmi_function *fn = to_rmi_function(dev);
@@ -178,7 +213,14 @@ static int rmi_function_probe(struct device *dev)
 
        if (handler->probe) {
                error = handler->probe(fn);
-               return error;
+               if (error)
+                       return error;
+       }
+
+       if (fn->num_of_irqs && handler->attention) {
+               error = rmi_create_function_irq(fn, handler);
+               if (error)
+                       return error;
        }
 
        return 0;
@@ -230,12 +272,18 @@ int rmi_register_function(struct rmi_function *fn)
 
 void rmi_unregister_function(struct rmi_function *fn)
 {
+       int i;
+
        rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
                        fn->fd.function_number);
 
        device_del(&fn->dev);
        of_node_put(fn->dev.of_node);
        put_device(&fn->dev);
+
+       for (i = 0; i < fn->num_of_irqs; i++)
+               irq_dispose_mapping(fn->irq[i]);
+
 }
 
 /**
index b7625a9ac66ab5384727cc83496223be3aedbe92..96383eab41ba1d850468a64e7ced8a3f1bf72ff6 100644 (file)
 
 struct rmi_device;
 
+/*
+ * The interrupt source count in the function descriptor can represent up to
+ * 6 interrupt sources in the normal manner.
+ */
+#define RMI_FN_MAX_IRQS        6
+
 /**
  * struct rmi_function - represents the implementation of an RMI4
  * function for a particular device (basically, a driver for that RMI4 function)
@@ -26,6 +32,7 @@ struct rmi_device;
  * @irq_pos: The position in the irq bitfield this function holds
  * @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN
  * interrupt handling.
+ * @irqs: assigned virq numbers (up to num_of_irqs)
  *
  * @node: entry in device's list of functions
  */
@@ -36,6 +43,7 @@ struct rmi_function {
        struct list_head node;
 
        unsigned int num_of_irqs;
+       int irq[RMI_FN_MAX_IRQS];
        unsigned int irq_pos;
        unsigned long irq_mask[];
 };
@@ -76,7 +84,7 @@ struct rmi_function_handler {
        void (*remove)(struct rmi_function *fn);
        int (*config)(struct rmi_function *fn);
        int (*reset)(struct rmi_function *fn);
-       int (*attention)(struct rmi_function *fn, unsigned long *irq_bits);
+       irqreturn_t (*attention)(int irq, void *ctx);
        int (*suspend)(struct rmi_function *fn);
        int (*resume)(struct rmi_function *fn);
 };
index 7d29053dfb0f06878ff7897b59f52039a299a089..fc3ab93b7aea454475ee324eecee91470c4a9dc3 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/irqdomain.h>
 #include <uapi/linux/input.h>
 #include <linux/rmi.h>
 #include "rmi_bus.h"
@@ -127,28 +128,11 @@ static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
        return 0;
 }
 
-static void process_one_interrupt(struct rmi_driver_data *data,
-                                 struct rmi_function *fn)
-{
-       struct rmi_function_handler *fh;
-
-       if (!fn || !fn->dev.driver)
-               return;
-
-       fh = to_rmi_function_handler(fn->dev.driver);
-       if (fh->attention) {
-               bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask,
-                               data->irq_count);
-               if (!bitmap_empty(data->fn_irq_bits, data->irq_count))
-                       fh->attention(fn, data->fn_irq_bits);
-       }
-}
-
 static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
 {
        struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
        struct device *dev = &rmi_dev->dev;
-       struct rmi_function *entry;
+       int i;
        int error;
 
        if (!data)
@@ -173,16 +157,8 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
         */
        mutex_unlock(&data->irq_mutex);
 
-       /*
-        * It would be nice to be able to use irq_chip to handle these
-        * nested IRQs.  Unfortunately, most of the current customers for
-        * this driver are using older kernels (3.0.x) that don't support
-        * the features required for that.  Once they've shifted to more
-        * recent kernels (say, 3.3 and higher), this should be switched to
-        * use irq_chip.
-        */
-       list_for_each_entry(entry, &data->function_list, node)
-               process_one_interrupt(data, entry);
+       for_each_set_bit(i, data->irq_status, data->irq_count)
+               handle_nested_irq(irq_find_mapping(data->irqdomain, i));
 
        if (data->input)
                input_sync(data->input);
@@ -1001,9 +977,13 @@ EXPORT_SYMBOL_GPL(rmi_driver_resume);
 static int rmi_driver_remove(struct device *dev)
 {
        struct rmi_device *rmi_dev = to_rmi_device(dev);
+       struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
 
        rmi_disable_irq(rmi_dev, false);
 
+       irq_domain_remove(data->irqdomain);
+       data->irqdomain = NULL;
+
        rmi_f34_remove_sysfs(rmi_dev);
        rmi_free_function_list(rmi_dev);
 
@@ -1035,7 +1015,8 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
 {
        struct rmi_device *rmi_dev = data->rmi_dev;
        struct device *dev = &rmi_dev->dev;
-       int irq_count;
+       struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
+       int irq_count = 0;
        size_t size;
        int retval;
 
@@ -1046,7 +1027,6 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
         * being accessed.
         */
        rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
-       irq_count = 0;
        data->bootloader_mode = false;
 
        retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
@@ -1058,6 +1038,15 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
        if (data->bootloader_mode)
                dev_warn(dev, "Device in bootloader mode.\n");
 
+       /* Allocate and register a linear revmap irq_domain */
+       data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
+                                                  &irq_domain_simple_ops,
+                                                  data);
+       if (!data->irqdomain) {
+               dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
+               return -ENOMEM;
+       }
+
        data->irq_count = irq_count;
        data->num_of_irq_regs = (data->irq_count + 7) / 8;
 
@@ -1080,10 +1069,9 @@ int rmi_init_functions(struct rmi_driver_data *data)
 {
        struct rmi_device *rmi_dev = data->rmi_dev;
        struct device *dev = &rmi_dev->dev;
-       int irq_count;
+       int irq_count = 0;
        int retval;
 
-       irq_count = 0;
        rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
        retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
        if (retval < 0) {
index 8a07ae147df690ee7796c3f9f897904fce6ac6dd..4edaa14fe878650c81e6267550869f8acc714b40 100644 (file)
@@ -681,9 +681,9 @@ static int rmi_f01_resume(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f01_attention(struct rmi_function *fn,
-                            unsigned long *irq_bits)
+static irqreturn_t rmi_f01_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        int error;
        u8 device_status;
@@ -692,7 +692,7 @@ static int rmi_f01_attention(struct rmi_function *fn,
        if (error) {
                dev_err(&fn->dev,
                        "Failed to read device status: %d.\n", error);
-               return error;
+               return IRQ_RETVAL(error);
        }
 
        if (RMI_F01_STATUS_BOOTLOADER(device_status))
@@ -704,11 +704,11 @@ static int rmi_f01_attention(struct rmi_function *fn,
                error = rmi_dev->driver->reset_handler(rmi_dev);
                if (error) {
                        dev_err(&fn->dev, "Device reset failed: %d\n", error);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 struct rmi_function_handler rmi_f01_handler = {
index 88822196d6b723fcf69efd9c3b685fb92dedcf7b..aaa1edc9552254609c1e2ba00008b48bf80f3a85 100644 (file)
@@ -244,8 +244,9 @@ static int rmi_f03_config(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f03_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f03_data *f03 = dev_get_drvdata(&fn->dev);
@@ -262,7 +263,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                /* First grab the data passed by the transport device */
                if (drvdata->attn_data.size < ob_len) {
                        dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
-                       return 0;
+                       return IRQ_HANDLED;
                }
 
                memcpy(obs, drvdata->attn_data.data, ob_len);
@@ -277,7 +278,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                                "%s: Failed to read F03 output buffers: %d\n",
                                __func__, error);
                        serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
@@ -303,7 +304,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                serio_interrupt(f03->serio, ob_data, serio_flags);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static void rmi_f03_remove(struct rmi_function *fn)
index 12a233251793c24c754224ae1b379de52db34e7d..df64d6aed4f7e10b8eb78eb78619a15d7bcaaf56 100644 (file)
@@ -570,9 +570,7 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
 }
 
 static void rmi_f11_finger_handler(struct f11_data *f11,
-                                  struct rmi_2d_sensor *sensor,
-                                  unsigned long *irq_bits, int num_irq_regs,
-                                  int size)
+                                  struct rmi_2d_sensor *sensor, int size)
 {
        const u8 *f_state = f11->data.f_state;
        u8 finger_state;
@@ -581,12 +579,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
        int rel_fingers;
        int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
 
-       int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
-                                 num_irq_regs * 8);
-       int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
-                                 num_irq_regs * 8);
-
-       if (abs_bits) {
+       if (sensor->report_abs) {
                if (abs_size > size)
                        abs_fingers = size / RMI_F11_ABS_BYTES;
                else
@@ -604,19 +597,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
                        rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
                                                        finger_state, i);
                }
-       }
 
-       if (rel_bits) {
-               if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
-                       rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
-               else
-                       rel_fingers = sensor->nbr_fingers;
-
-               for (i = 0; i < rel_fingers; i++)
-                       rmi_f11_rel_pos_report(f11, i);
-       }
-
-       if (abs_bits) {
                /*
                 * the absolute part is made in 2 parts to allow the kernel
                 * tracking to take place.
@@ -638,7 +619,16 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
                }
 
                input_mt_sync_frame(sensor->input);
+       } else if (sensor->report_rel) {
+               if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
+                       rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
+               else
+                       rel_fingers = sensor->nbr_fingers;
+
+               for (i = 0; i < rel_fingers; i++)
+                       rmi_f11_rel_pos_report(f11, i);
        }
+
 }
 
 static int f11_2d_construct_data(struct f11_data *f11)
@@ -1276,8 +1266,9 @@ static int rmi_f11_config(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f11_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f11_data *f11 = dev_get_drvdata(&fn->dev);
@@ -1303,13 +1294,12 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
                                data_base_addr, f11->sensor.data_pkt,
                                f11->sensor.pkt_size);
                if (error < 0)
-                       return error;
+                       return IRQ_RETVAL(error);
        }
 
-       rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
-                               drvdata->num_of_irq_regs, valid_bytes);
+       rmi_f11_finger_handler(f11, &f11->sensor, valid_bytes);
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f11_resume(struct rmi_function *fn)
index a3d1aa88f2a9ce27fcd1f89d2f87d58b21686fce..5c7f489157792bf32da34e982b715824ec17eaff 100644 (file)
@@ -197,10 +197,10 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
                rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
 }
 
-static int rmi_f12_attention(struct rmi_function *fn,
-                            unsigned long *irq_nr_regs)
+static irqreturn_t rmi_f12_attention(int irq, void *ctx)
 {
        int retval;
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f12_data *f12 = dev_get_drvdata(&fn->dev);
@@ -222,7 +222,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
                if (retval < 0) {
                        dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
                                retval);
-                       return retval;
+                       return IRQ_RETVAL(retval);
                }
        }
 
@@ -232,7 +232,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
 
        input_mt_sync_frame(sensor->input);
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f12_write_control_regs(struct rmi_function *fn)
index 82e0f0d43d55271c92c774ba325b1bc40099f83e..5e3ed5ac0c3e40b3919b59493293720877907f1a 100644 (file)
@@ -122,8 +122,9 @@ static void rmi_f30_report_button(struct rmi_function *fn,
        }
 }
 
-static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f30_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct f30_data *f30 = dev_get_drvdata(&fn->dev);
        struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
        int error;
@@ -134,7 +135,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                if (drvdata->attn_data.size < f30->register_count) {
                        dev_warn(&fn->dev,
                                 "F30 interrupted, but data is missing\n");
-                       return 0;
+                       return IRQ_HANDLED;
                }
                memcpy(f30->data_regs, drvdata->attn_data.data,
                        f30->register_count);
@@ -147,7 +148,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        dev_err(&fn->dev,
                                "%s: Failed to read F30 data registers: %d\n",
                                __func__, error);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
@@ -159,7 +160,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        rmi_f03_commit_buttons(f30->f03);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f30_config(struct rmi_function *fn)
index f1f5ac539d5d56b2d554e2aa7bdb50fd0af0d5e3..87a7d4ba382d7210b294f8168adb5083c15b80ee 100644 (file)
@@ -100,8 +100,9 @@ static int rmi_f34_command(struct f34_data *f34, u8 command,
        return 0;
 }
 
-static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f34_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct f34_data *f34 = dev_get_drvdata(&fn->dev);
        int ret;
        u8 status;
@@ -126,7 +127,7 @@ static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        complete(&f34->v7.cmd_done);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
index e8a59d1640192b75e6f83db0e2ad355064c19ff9..a6f515bcab2228a8783f10dbf10fae30462fd852 100644 (file)
@@ -610,11 +610,6 @@ static void rmi_f54_work(struct work_struct *work)
        mutex_unlock(&f54->data_mutex);
 }
 
-static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits)
-{
-       return 0;
-}
-
 static int rmi_f54_config(struct rmi_function *fn)
 {
        struct rmi_driver *drv = fn->rmi_dev->driver;
@@ -756,6 +751,5 @@ struct rmi_function_handler rmi_f54_handler = {
        .func = 0x54,
        .probe = rmi_f54_probe,
        .config = rmi_f54_config,
-       .attention = rmi_f54_attention,
        .remove = rmi_f54_remove,
 };
index ff7043f74a3d32286a6b8cdbed91f1bc3f0be12f..d196ac3d8b8cda8e1cf405101ed5603473db821d 100644 (file)
@@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
        { "GSL3692", 0 },
        { "MSSL1680", 0 },
        { "MSSL0001", 0 },
+       { "MSSL0002", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
index 0f52d44b3f6997c8c9e4e6f6f1a7da7b43d3e7c5..f5fe0100f9ffd043d251d96ce473775bfdafd3b4 100644 (file)
@@ -199,7 +199,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 
 fail:
        irq_domain_free_irqs_parent(domain, virq, nr_irqs);
-       gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
+       gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
        return err;
 }
 
index 5377d7e2afba62b518671267b5d29c4963c2e5e6..d7842d312d3eacd7d07853caa6a529a1c8080c99 100644 (file)
@@ -182,6 +182,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
        return its->collections + its_dev->event_map.col_map[event];
 }
 
+static struct its_collection *valid_col(struct its_collection *col)
+{
+       if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+               return NULL;
+
+       return col;
+}
+
+static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
+{
+       if (valid_col(its->collections + vpe->col_idx))
+               return vpe;
+
+       return NULL;
+}
+
 /*
  * ITS command descriptors - parameters to be encoded in a command
  * block.
@@ -439,7 +455,7 @@ static struct its_collection *its_build_mapti_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_movi_cmd(struct its_node *its,
@@ -458,7 +474,7 @@ static struct its_collection *its_build_movi_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_discard_cmd(struct its_node *its,
@@ -476,7 +492,7 @@ static struct its_collection *its_build_discard_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_inv_cmd(struct its_node *its,
@@ -494,7 +510,7 @@ static struct its_collection *its_build_inv_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_int_cmd(struct its_node *its,
@@ -512,7 +528,7 @@ static struct its_collection *its_build_int_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_clear_cmd(struct its_node *its,
@@ -530,7 +546,7 @@ static struct its_collection *its_build_clear_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_invall_cmd(struct its_node *its,
@@ -554,7 +570,7 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vinvall_cmd.vpe;
+       return valid_vpe(its, desc->its_vinvall_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
@@ -576,7 +592,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmapp_cmd.vpe;
+       return valid_vpe(its, desc->its_vmapp_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -599,7 +615,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmapti_cmd.vpe;
+       return valid_vpe(its, desc->its_vmapti_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
@@ -622,7 +638,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmovi_cmd.vpe;
+       return valid_vpe(its, desc->its_vmovi_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
@@ -640,7 +656,7 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmovp_cmd.vpe;
+       return valid_vpe(its, desc->its_vmovp_cmd.vpe);
 }
 
 static u64 its_cmd_ptr_to_offset(struct its_node *its,
@@ -1824,11 +1840,16 @@ static int its_alloc_tables(struct its_node *its)
 
 static int its_alloc_collections(struct its_node *its)
 {
+       int i;
+
        its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
                                   GFP_KERNEL);
        if (!its->collections)
                return -ENOMEM;
 
+       for (i = 0; i < nr_cpu_ids; i++)
+               its->collections[i].target_address = ~0ULL;
+
        return 0;
 }
 
@@ -2310,7 +2331,14 @@ static int its_irq_domain_activate(struct irq_domain *domain,
                cpu_mask = cpumask_of_node(its_dev->its->numa_node);
 
        /* Bind the LPI to the first possible CPU */
-       cpu = cpumask_first(cpu_mask);
+       cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
+       if (cpu >= nr_cpu_ids) {
+               if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
+                       return -EINVAL;
+
+               cpu = cpumask_first(cpu_online_mask);
+       }
+
        its_dev->event_map.col_map[event] = cpu;
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
@@ -3399,6 +3427,16 @@ static int redist_disable_lpis(void)
        u64 timeout = USEC_PER_SEC;
        u64 val;
 
+       /*
+        * If coming via a CPU hotplug event, we don't need to disable
+        * LPIs before trying to re-enable them. They are already
+        * configured and all is well in the world. Detect this case
+        * by checking the allocation of the pending table for the
+        * current CPU.
+        */
+       if (gic_data_rdist()->pend_page)
+               return 0;
+
        if (!gic_rdists_supports_plpis()) {
                pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
                return -ENXIO;
index 1ec3bfe56693ab39831e464048b1959a04250e6d..c671b3212010e6de583e5a5211fc2a20064200f2 100644 (file)
@@ -93,8 +93,12 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
        msg->address_lo = lower_32_bits(msi_data->msiir_addr);
        msg->data = data->hwirq;
 
-       if (msi_affinity_flag)
-               msg->data |= cpumask_first(data->common->affinity);
+       if (msi_affinity_flag) {
+               const struct cpumask *mask;
+
+               mask = irq_data_get_effective_affinity_mask(data);
+               msg->data |= cpumask_first(mask);
+       }
 
        iommu_dma_map_msi_msg(data->irq, msg);
 }
@@ -121,7 +125,7 @@ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
                return -EINVAL;
        }
 
-       cpumask_copy(irq_data->common->affinity, mask);
+       irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
 
        return IRQ_SET_MASK_OK;
 }
index 6e0c2814d0329ace7b109915d58c9805ce2b1e6a..ef5560b848ab3a66adc29d119d5608bf726642e0 100644 (file)
@@ -9,6 +9,7 @@
  *
  */
 
+#include <linux/compiler.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
@@ -1321,7 +1322,7 @@ static inline void capinc_tty_exit(void) { }
  * /proc/capi/capi20:
  *  minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
  */
-static int capi20_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capi20_proc_show(struct seq_file *m, void *v)
 {
        struct capidev *cdev;
        struct list_head *l;
@@ -1344,7 +1345,7 @@ static int capi20_proc_show(struct seq_file *m, void *v)
  * /proc/capi/capi20ncci:
  *  applid ncci
  */
-static int capi20ncci_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capi20ncci_proc_show(struct seq_file *m, void *v)
 {
        struct capidev *cdev;
        struct capincci *np;
index ee510f901720d1686524f37bc598bd6e3ee254a0..e8949f3dcae17712cae36973804f90d8602ca3b4 100644 (file)
@@ -9,6 +9,7 @@
  *
  */
 
+#include <linux/compiler.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
@@ -2451,7 +2452,7 @@ lower_callback(struct notifier_block *nb, unsigned long val, void *v)
  * /proc/capi/capidrv:
  * nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
  */
-static int capidrv_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused capidrv_proc_show(struct seq_file *m, void *v)
 {
        seq_printf(m, "%lu %lu %lu %lu\n",
                   global.ap.nrecvctlpkt,
index 20d0a080a2b0d521ace124c8b1206b0298b7d55e..ecdeb89645d00454381ef4020986fdf5e38f2eec 100644 (file)
@@ -739,6 +739,7 @@ static void read_int_callback(struct urb *urb)
 
        case HD_OPEN_B2CHANNEL_ACK:
                ++channel;
+               /* fall through */
        case HD_OPEN_B1CHANNEL_ACK:
                bcs = cs->bcs + channel;
                update_basstate(ucs, BS_B1OPEN << channel, 0);
@@ -752,6 +753,7 @@ static void read_int_callback(struct urb *urb)
 
        case HD_CLOSE_B2CHANNEL_ACK:
                ++channel;
+               /* fall through */
        case HD_CLOSE_B1CHANNEL_ACK:
                bcs = cs->bcs + channel;
                update_basstate(ucs, 0, BS_B1OPEN << channel);
@@ -765,6 +767,7 @@ static void read_int_callback(struct urb *urb)
 
        case HD_B2_FLOW_CONTROL:
                ++channel;
+               /* fall through */
        case HD_B1_FLOW_CONTROL:
                bcs = cs->bcs + channel;
                atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
@@ -972,16 +975,14 @@ static int starturbs(struct bc_state *bcs)
                        rc = -EFAULT;
                        goto error;
                }
+               usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
+                                usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel),
+                                ubc->isoinbuf + k * BAS_INBUFSIZE,
+                                BAS_INBUFSIZE, read_iso_callback, bcs,
+                                BAS_FRAMETIME);
 
-               urb->dev = bcs->cs->hw.bas->udev;
-               urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel);
                urb->transfer_flags = URB_ISO_ASAP;
-               urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE;
-               urb->transfer_buffer_length = BAS_INBUFSIZE;
                urb->number_of_packets = BAS_NUMFRAMES;
-               urb->interval = BAS_FRAMETIME;
-               urb->complete = read_iso_callback;
-               urb->context = bcs;
                for (j = 0; j < BAS_NUMFRAMES; j++) {
                        urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
                        urb->iso_frame_desc[j].length = BAS_MAXFRAME;
@@ -1005,15 +1006,15 @@ static int starturbs(struct bc_state *bcs)
                        rc = -EFAULT;
                        goto error;
                }
-               urb->dev = bcs->cs->hw.bas->udev;
-               urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel);
+               usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
+                                usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel),
+                                ubc->isooutbuf->data,
+                                sizeof(ubc->isooutbuf->data),
+                                write_iso_callback, &ubc->isoouturbs[k],
+                                BAS_FRAMETIME);
+
                urb->transfer_flags = URB_ISO_ASAP;
-               urb->transfer_buffer = ubc->isooutbuf->data;
-               urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
                urb->number_of_packets = BAS_NUMFRAMES;
-               urb->interval = BAS_FRAMETIME;
-               urb->complete = write_iso_callback;
-               urb->context = &ubc->isoouturbs[k];
                for (j = 0; j < BAS_NUMFRAMES; ++j) {
                        urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
                        urb->iso_frame_desc[j].length = BAS_NORMFRAME;
index ae2b2669af1bc44667e49e60148c9591b7fa4e85..8eb28a83832eddc11f5fab235ad29f0252a89268 100644 (file)
@@ -361,6 +361,7 @@ modehdlc(struct bchannel *bch, int protocol)
        switch (protocol) {
        case -1: /* used for init */
                bch->state = -1;
+               /* fall through */
        case ISDN_P_NONE:
                if (bch->state == ISDN_P_NONE)
                        break;
index 34c93874af23bc43565119eab4b8b9b5d84a0815..72a271b988730d071f4a4c28bb0b0f2418391954 100644 (file)
@@ -1296,6 +1296,7 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
        case (-1): /* used for init */
                bch->state = -1;
                bch->nr = bc;
+               /* fall through */
        case (ISDN_P_NONE):
                if (bch->state == ISDN_P_NONE)
                        return 0;
index 17cc879ad2bbf784be196528d14a533bf0267b29..6d05946b445eb039aeb6c9c755e94dbe8b8f1dac 100644 (file)
@@ -819,6 +819,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
        int             fifon = fifo->fifonum;
        int             i;
        int             hdlc = 0;
+       unsigned long   flags;
 
        if (debug & DBG_HFC_CALL_TRACE)
                printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) "
@@ -835,7 +836,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
                return;
        }
 
-       spin_lock(&hw->lock);
+       spin_lock_irqsave(&hw->lock, flags);
        if (fifo->dch) {
                rx_skb = fifo->dch->rx_skb;
                maxlen = fifo->dch->maxlen;
@@ -844,7 +845,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
        if (fifo->bch) {
                if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) {
                        fifo->bch->dropcnt += len;
-                       spin_unlock(&hw->lock);
+                       spin_unlock_irqrestore(&hw->lock, flags);
                        return;
                }
                maxlen = bchannel_get_rxbuf(fifo->bch, len);
@@ -854,7 +855,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
                                skb_trim(rx_skb, 0);
                        pr_warning("%s.B%d: No bufferspace for %d bytes\n",
                                   hw->name, fifo->bch->nr, len);
-                       spin_unlock(&hw->lock);
+                       spin_unlock_irqrestore(&hw->lock, flags);
                        return;
                }
                maxlen = fifo->bch->maxlen;
@@ -878,7 +879,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
                        } else {
                                printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
                                       hw->name, __func__);
-                               spin_unlock(&hw->lock);
+                               spin_unlock_irqrestore(&hw->lock, flags);
                                return;
                        }
                }
@@ -888,7 +889,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
                               "for fifo(%d) HFCUSB_D_RX\n",
                               hw->name, __func__, fifon);
                        skb_trim(rx_skb, 0);
-                       spin_unlock(&hw->lock);
+                       spin_unlock_irqrestore(&hw->lock, flags);
                        return;
                }
        }
@@ -942,7 +943,7 @@ hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len,
                /* deliver transparent data to layer2 */
                recv_Bchannel(fifo->bch, MISDN_ID_ANY, false);
        }
-       spin_unlock(&hw->lock);
+       spin_unlock_irqrestore(&hw->lock, flags);
 }
 
 static void
@@ -979,18 +980,19 @@ rx_iso_complete(struct urb *urb)
        __u8 *buf;
        static __u8 eof[8];
        __u8 s0_state;
+       unsigned long flags;
 
        fifon = fifo->fifonum;
        status = urb->status;
 
-       spin_lock(&hw->lock);
+       spin_lock_irqsave(&hw->lock, flags);
        if (fifo->stop_gracefull) {
                fifo->stop_gracefull = 0;
                fifo->active = 0;
-               spin_unlock(&hw->lock);
+               spin_unlock_irqrestore(&hw->lock, flags);
                return;
        }
-       spin_unlock(&hw->lock);
+       spin_unlock_irqrestore(&hw->lock, flags);
 
        /*
         * ISO transfer only partially completed,
@@ -1096,15 +1098,16 @@ rx_int_complete(struct urb *urb)
        struct usb_fifo *fifo = (struct usb_fifo *) urb->context;
        struct hfcsusb *hw = fifo->hw;
        static __u8 eof[8];
+       unsigned long flags;
 
-       spin_lock(&hw->lock);
+       spin_lock_irqsave(&hw->lock, flags);
        if (fifo->stop_gracefull) {
                fifo->stop_gracefull = 0;
                fifo->active = 0;
-               spin_unlock(&hw->lock);
+               spin_unlock_irqrestore(&hw->lock, flags);
                return;
        }
-       spin_unlock(&hw->lock);
+       spin_unlock_irqrestore(&hw->lock, flags);
 
        fifon = fifo->fifonum;
        if ((!fifo->active) || (urb->status)) {
@@ -1172,12 +1175,13 @@ tx_iso_complete(struct urb *urb)
        int *tx_idx;
        int frame_complete, fifon, status, fillempty = 0;
        __u8 threshbit, *p;
+       unsigned long flags;
 
-       spin_lock(&hw->lock);
+       spin_lock_irqsave(&hw->lock, flags);
        if (fifo->stop_gracefull) {
                fifo->stop_gracefull = 0;
                fifo->active = 0;
-               spin_unlock(&hw->lock);
+               spin_unlock_irqrestore(&hw->lock, flags);
                return;
        }
 
@@ -1195,7 +1199,7 @@ tx_iso_complete(struct urb *urb)
        } else {
                printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n",
                       hw->name, __func__);
-               spin_unlock(&hw->lock);
+               spin_unlock_irqrestore(&hw->lock, flags);
                return;
        }
 
@@ -1375,7 +1379,7 @@ tx_iso_complete(struct urb *urb)
                               hw->name, __func__,
                               symbolic(urb_errlist, status), status, fifon);
        }
-       spin_unlock(&hw->lock);
+       spin_unlock_irqrestore(&hw->lock, flags);
 }
 
 /*
index 1fc290659e945a5ffdf2efcda8cf687c4bedd42d..3e01012be4abc3a8abe51d727ad1fe70b83badd7 100644 (file)
@@ -887,6 +887,7 @@ release_card(struct inf_hw *card) {
                                release_card(card->sc[i]);
                        card->sc[i] = NULL;
                }
+               /* fall through */
        default:
                pci_disable_device(card->pdev);
                pci_set_drvdata(card->pdev, NULL);
index b791688d0228ccb8921b57775bf6330adb8a96ea..386731ec248912f327afe1e327306274add1876e 100644 (file)
@@ -972,6 +972,7 @@ isar_pump_statev_fax(struct isar_ch *ch, u8 devt) {
                                break;
                        case PCTRL_CMD_FTM:
                                p1 = 2;
+                               /* fall through */
                        case PCTRL_CMD_FTH:
                                send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL,
                                          PCTRL_CMD_SILON, 1, &p1);
@@ -1177,6 +1178,7 @@ setup_pump(struct isar_ch *ch) {
                        send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG,
                                  PMOD_DTMF, 1, param);
                }
+               /* fall through */
        case ISDN_P_B_MODEM_ASYNC:
                ctrl = PMOD_DATAMODEM;
                if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) {
@@ -1268,6 +1270,7 @@ setup_iom2(struct isar_ch *ch) {
        case ISDN_P_B_MODEM_ASYNC:
        case ISDN_P_B_T30_FAX:
                cmsb |= IOM_CTRL_RCV;
+               /* fall through */
        case ISDN_P_B_L2DTMF:
                if (test_bit(FLG_DTMFSEND, &ch->bch.Flags))
                        cmsb |= IOM_CTRL_RCV;
@@ -1560,6 +1563,7 @@ isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb)
                                ich->is->name, hh->id);
                        ret = -EINVAL;
                }
+               /* fall through */
        default:
                pr_info("%s: %s unknown prim(%x,%x)\n",
                        ich->is->name, __func__, hh->prim, hh->id);
index a18b605fb4f23e97d863b6bc64db924ad58053eb..b161456c942e2ea94796bfceb4df71f8400a1bf1 100644 (file)
@@ -207,6 +207,7 @@ modehdlc(struct BCState *bcs, int mode, int bc)
                bcs->mode = 1;
                bcs->channel = bc;
                bc = 0;
+               /* fall through */
        case (L1_MODE_NULL):
                if (bcs->mode == L1_MODE_NULL)
                        return;
index ddec47a911a0df6ea74c1a9f5e926fbbacc54a2a..5f43783039d4617dcf5e328435192a3beed235fa 100644 (file)
@@ -1369,6 +1369,7 @@ leased_l1l2(struct PStack *st, int pr, void *arg)
        case (PH_ACTIVATE | INDICATION):
        case (PH_ACTIVATE | CONFIRM):
                event = EV_LEASED;
+               /* fall through */
        case (PH_DEACTIVATE | INDICATION):
        case (PH_DEACTIVATE | CONFIRM):
                if (test_bit(FLG_TWO_DCHAN, &chanp->cs->HW_Flags))
index 7108bdb8742e7a41b5aac61ada26ad212211fe67..fcc9c46127b40da1bf55ff346a69190bbc4a9fea 100644 (file)
@@ -1843,6 +1843,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg)
        case PH_DEACTIVATE | REQUEST:
                test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
                skb_queue_purge(&bcs->squeue);
+               /* fall through */
        default:
                B_L2L1(b_if, pr, arg);
                break;
index 35c6df6534ecde8507c417a61a1c93f41fa4a293..a6d8af02354a4eb7240a0393f65226f424f0c223 100644 (file)
@@ -108,6 +108,7 @@ ReadISAC(struct IsdnCardState *cs, u_char offset)
        switch (cs->subtyp) {
        case R647:
                off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+               /* fall through */
        case R685:
                return (readreg(cs->hw.gazel.isac, off2));
        case R753:
@@ -125,6 +126,7 @@ WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
        switch (cs->subtyp) {
        case R647:
                off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+               /* fall through */
        case R685:
                writereg(cs->hw.gazel.isac, off2, value);
                break;
@@ -203,6 +205,7 @@ ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
        switch (cs->subtyp) {
        case R647:
                off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+               /* fall through */
        case R685:
                return (readreg(cs->hw.gazel.hscx[hscx], off2));
        case R753:
@@ -220,6 +223,7 @@ WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
        switch (cs->subtyp) {
        case R647:
                off2 = ((off2 << 8 & 0xf000) | (off2 & 0xf));
+               /* fall through */
        case R685:
                writereg(cs->hw.gazel.hscx[hscx], off2, value);
                break;
index 97ecb3073045612f28bb80954c068726ee957504..1d4cd01d46851027776fac05f92c88f91dbb883d 100644 (file)
@@ -432,16 +432,12 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe,
 {
        int k;
 
-       urb->dev = dev;
-       urb->pipe = pipe;
-       urb->complete = complete;
+       usb_fill_int_urb(urb, dev, pipe, buf, packet_size * num_packets,
+                        complete, context, interval);
+
        urb->number_of_packets = num_packets;
-       urb->transfer_buffer_length = packet_size * num_packets;
-       urb->context = context;
-       urb->transfer_buffer = buf;
        urb->transfer_flags = URB_ISO_ASAP;
        urb->actual_length = 0;
-       urb->interval = interval;
        for (k = 0; k < num_packets; k++) {
                urb->iso_frame_desc[k].offset = packet_size * k;
                urb->iso_frame_desc[k].length = packet_size;
index d01ff116797b937d02f04ce17abc8fe8802cd528..82c1879f56647e7bc39b789587c78dafa6367cfa 100644 (file)
@@ -1089,6 +1089,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
                                break;
                        case PCTRL_CMD_FTM:
                                p1 = 2;
+                               /* fall through */
                        case PCTRL_CMD_FTH:
                                sendmsg(cs, dps | ISAR_HIS_PUMPCTRL,
                                        PCTRL_CMD_SILON, 1, &p1);
@@ -1097,6 +1098,7 @@ isar_pump_statev_fax(struct BCState *bcs, u_char devt) {
                        case PCTRL_CMD_FRM:
                                if (frm_extra_delay)
                                        mdelay(frm_extra_delay);
+                               /* fall through */
                        case PCTRL_CMD_FRH:
                                p1 = bcs->hw.isar.mod = bcs->hw.isar.newmod;
                                bcs->hw.isar.newmod = 0;
index da0a1c6aa32996e6cab700b19654b819cfa0afa4..98f60d1523f4ec06fb761014c3ebdcd592608cfe 100644 (file)
@@ -88,6 +88,7 @@ l3_1tr6_setup_req(struct l3_process *pc, u_char pr, void *arg)
                        break;
                case 'C':
                        channel = 0x08;
+                       /* fall through */
                case 'P':
                        channel |= 0x80;
                        teln++;
index 18a3484b1f7e96f33439875ede43704f91feaab8..368d152a8f1d7d00b6414f9963a7cad727beb77e 100644 (file)
@@ -1282,6 +1282,7 @@ l3dss1_setup_req(struct l3_process *pc, u_char pr,
                        switch (0x5f & *teln) {
                        case 'C':
                                channel = 0x08;
+                               /* fall through */
                        case 'P':
                                channel |= 0x80;
                                teln++;
index 1cb9930d5e24cc23df4ce777f3d5acdd21374985..f207fda691c71ae67b260d66485db48998346f80 100644 (file)
@@ -408,15 +408,10 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev,
 {
        int k;
 
-       urb->dev = dev;
-       urb->pipe = pipe;
-       urb->interval = 1;
-       urb->transfer_buffer = buf;
+       usb_fill_int_urb(urb, dev, pipe, buf, num_packets * packet_size,
+                        complete, context, 1);
+
        urb->number_of_packets = num_packets;
-       urb->transfer_buffer_length = num_packets * packet_size;
-       urb->actual_length = 0;
-       urb->complete = complete;
-       urb->context = context;
        urb->transfer_flags = URB_ISO_ASAP;
        for (k = 0; k < num_packets; k++) {
                urb->iso_frame_desc[k].offset = packet_size * k;
index 4a0425378f37864779135048c097ec1e73cc165a..ba177c3a621b1371e0c50ded7a3d0967c4309039 100644 (file)
@@ -99,6 +99,7 @@ pof_handle_data(hysdn_card *card, int datlen)
 
        case TAG_CBOOTDTA:
                DecryptBuf(boot, datlen);       /* we need to encrypt the buffer */
+               /* fall through */
        case TAG_BOOTDTA:
                if (card->debug_flags & LOG_POF_RECORD)
                        hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
@@ -137,6 +138,7 @@ pof_handle_data(hysdn_card *card, int datlen)
 
        case TAG_CABSDATA:
                DecryptBuf(boot, datlen);       /* we need to encrypt the buffer */
+               /* fall through */
        case TAG_ABSDATA:
                if (card->debug_flags & LOG_POF_RECORD)
                        hysdn_addlog(card, "POF got %s len=%d offs=0x%lx",
index 960f26348bb58e00f81166444a53d7544f8e7f68..b730037a0e2d383b2f6037561ccd396c558077a8 100644 (file)
@@ -787,7 +787,7 @@ isdn_tty_suspend(char *id, modem_info *info, atemu *m)
                cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */
                cmd.parm.cmsg.para[4] = 0;
                cmd.parm.cmsg.para[5] = l;
-               strncpy(&cmd.parm.cmsg.para[6], id, l);
+               memcpy(&cmd.parm.cmsg.para[6], id, l);
                cmd.command = CAPI_PUT_MESSAGE;
                cmd.driver = info->isdn_driver;
                cmd.arg = info->isdn_channel;
@@ -877,7 +877,7 @@ isdn_tty_resume(char *id, modem_info *info, atemu *m)
                cmd.parm.cmsg.para[3] = 5; /* 16 bit 0x0005 Resume */
                cmd.parm.cmsg.para[4] = 0;
                cmd.parm.cmsg.para[5] = l;
-               strncpy(&cmd.parm.cmsg.para[6], id, l);
+               memcpy(&cmd.parm.cmsg.para[6], id, l);
                cmd.command = CAPI_PUT_MESSAGE;
                info->dialing = 1;
 //             strcpy(dev->num[i], n);
index 8b74ce412524827c14333e4444e275ac4fdcc723..2a5f6668756cd6ea79cc8e98e18e7b606fecd2e7 100644 (file)
@@ -354,6 +354,7 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen)
                                printk(KERN_WARNING "isdn_v110 (EncodeMatrix): buffer full!\n");
                                return line;
                        }
+                       /* else: fall through */
                case 128:
                        m[line] = 128;  /* leftmost -> set byte to 1000000 */
                        mbit = 64;      /* current bit in the matrix line */
@@ -386,20 +387,28 @@ EncodeMatrix(unsigned char *buf, int len, unsigned char *m, int mlen)
                switch (++line % 10) {
                case 1:
                        m[line++] = 0xfe;
+                       /* fall through */
                case 2:
                        m[line++] = 0xfe;
+                       /* fall through */
                case 3:
                        m[line++] = 0xfe;
+                       /* fall through */
                case 4:
                        m[line++] = 0xfe;
+                       /* fall through */
                case 5:
                        m[line++] = 0xbf;
+                       /* fall through */
                case 6:
                        m[line++] = 0xfe;
+                       /* fall through */
                case 7:
                        m[line++] = 0xfe;
+                       /* fall through */
                case 8:
                        m[line++] = 0xfe;
+                       /* fall through */
                case 9:
                        m[line++] = 0xfe;
                }
index 98f90aadd141b03c42bedd070b66030be7983d86..18c0a1281914fa3218761bd20b2a2e0c85e8aae6 100644 (file)
@@ -588,7 +588,7 @@ static const struct proto_ops data_sock_ops = {
        .getname        = data_sock_getname,
        .sendmsg        = mISDN_sock_sendmsg,
        .recvmsg        = mISDN_sock_recvmsg,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = data_sock_setsockopt,
index 422dced7c90ac26dcf0d366fedb32ab9edf44207..d97c6dd52223c9519e2af28e150cbc9f2a1faa0c 100644 (file)
@@ -539,6 +539,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
                rq.protocol = ISDN_P_NT_S0;
                if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
                        rq.protocol = ISDN_P_NT_E1;
+               /* fall through */
        case ISDN_P_LAPD_TE:
                ch->recv = mISDN_queue_message;
                ch->peer = &dev->D.st->own;
index 10c08982185a572ff05683461d514e86c8920f96..9c03f35d9df113c6eb6608f4b48b85447635aca9 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig NVM
        bool "Open-Channel SSD target support"
-       depends on BLOCK && HAS_DMA && PCI
+       depends on BLOCK && PCI
        select BLK_DEV_NVME
        help
          Say Y here to get to enable Open-channel SSDs.
index ab13fcec3fca046c3da6fd621f0e0db9c47b1bf9..75df4c9d8b541de480dfea8d823e0eff389d9ccd 100644 (file)
@@ -588,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
 }
 
 /* Return md raid10 algorithm for @name */
-static const int raid10_name_to_format(const char *name)
+static int raid10_name_to_format(const char *name)
 {
        if (!strcasecmp(name, "near"))
                return ALGORITHM_RAID10_NEAR;
index 938766794c2ef3b6caf538a0fa787447eadb160c..3d0e2c198f0614dbaf22db657a2bfc9336f89ebd 100644 (file)
@@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
 static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
                               sector_t start, sector_t len, void *data)
 {
-       struct request_queue *q = bdev_get_queue(dev->bdev);
-
-       return q && blk_queue_dax(q);
+       return bdev_dax_supported(dev->bdev, PAGE_SIZE);
 }
 
 static bool dm_table_supports_dax(struct dm_table *t)
@@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 
        if (dm_table_supports_dax(t))
                blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+       else
+               blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+
        if (dm_table_supports_dax_write_cache(t))
                dax_write_cache(t->md->dax_dev, true);
 
index 36ef284ad086b881324771d4f882dc6fa96d6dde..72142021b5c9a0410cfb6ccb04a93d613376fb53 100644 (file)
@@ -776,7 +776,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
 static int __commit_transaction(struct dm_pool_metadata *pmd)
 {
        int r;
-       size_t metadata_len, data_len;
        struct thin_disk_superblock *disk_super;
        struct dm_block *sblock;
 
@@ -797,14 +796,6 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
        if (r < 0)
                return r;
 
-       r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
-       if (r < 0)
-               return r;
-
-       r = dm_sm_root_size(pmd->data_sm, &data_len);
-       if (r < 0)
-               return r;
-
        r = save_sm_roots(pmd);
        if (r < 0)
                return r;
index 7945238df1c0a67a8e525697f0e419c7594ed1ad..b900723bbd0fae4845a17ef67dadcf33dc5cc67b 100644 (file)
@@ -1386,6 +1386,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
 
 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
 
+static void requeue_bios(struct pool *pool);
+
 static void check_for_space(struct pool *pool)
 {
        int r;
@@ -1398,8 +1400,10 @@ static void check_for_space(struct pool *pool)
        if (r)
                return;
 
-       if (nr_free)
+       if (nr_free) {
                set_pool_mode(pool, PM_WRITE);
+               requeue_bios(pool);
+       }
 }
 
 /*
@@ -1476,7 +1480,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
 
        r = dm_pool_alloc_data_block(pool->pmd, result);
        if (r) {
-               metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+               if (r == -ENOSPC)
+                       set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
+               else
+                       metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
                return r;
        }
 
index 5961c7794ef37008f7a10f521517aded086f20f3..07ea6a48aac69a1db35567222c4b8123f2ef9e59 100644 (file)
@@ -259,7 +259,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
        if (da != p) {
                long i;
                wc->memory_map = NULL;
-               pages = kvmalloc(p * sizeof(struct page *), GFP_KERNEL);
+               pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
                if (!pages) {
                        r = -ENOMEM;
                        goto err2;
@@ -859,7 +859,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
 
        if (wc->entries)
                return 0;
-       wc->entries = vmalloc(sizeof(struct wc_entry) * wc->n_blocks);
+       wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
        if (!wc->entries)
                return -ENOMEM;
        for (b = 0; b < wc->n_blocks; b++) {
@@ -1481,9 +1481,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
                wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
                wb->page_offset = PAGE_SIZE;
                if (max_pages <= WB_LIST_INLINE ||
-                   unlikely(!(wb->wc_list = kmalloc(max_pages * sizeof(struct wc_entry *),
-                                                    GFP_NOIO | __GFP_NORETRY |
-                                                    __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
+                   unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
+                                                          GFP_NOIO | __GFP_NORETRY |
+                                                          __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
                        wb->wc_list = wb->wc_list_inline;
                        max_pages = WB_LIST_INLINE;
                }
index 3c0e45f4dcf5cdf06d79b0c9d107d7455a0b6ad7..a44183ff4be0a3bd4219a7bf5854622aeca79db2 100644 (file)
@@ -787,7 +787,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        /* Chunk BIO work */
        mutex_init(&dmz->chunk_lock);
-       INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
+       INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
        dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
                                        0, dev->name);
        if (!dmz->chunk_wq) {
index e65429a29c06e2554e8a0e23ba5a0b2a3b18a8c8..b0dd7027848b7de9f701469c6eb29b5d9c96e1df 100644 (file)
@@ -1056,8 +1056,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
        if (len < 1)
                goto out;
        nr_pages = min(len, nr_pages);
-       if (ti->type->direct_access)
-               ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
+       ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
 
  out:
        dm_put_live_table(md, srcu_idx);
@@ -1606,10 +1605,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
                                 * the usage of io->orig_bio in dm_remap_zone_report()
                                 * won't be affected by this reassignment.
                                 */
-                               struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
-                                                                &md->queue->bio_split);
+                               struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
+                                                         GFP_NOIO, &md->queue->bio_split);
                                ci.io->orig_bio = b;
-                               bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
                                bio_chain(b, bio);
                                ret = generic_make_request(bio);
                                break;
index 29b0cd9ec951ee4603279656e7148ba2a5b8d763..994aed2f9dfff4135170102265523045e893ac0a 100644 (file)
@@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev)
                else
                        pr_warn("md: personality for level %s is not loaded!\n",
                                mddev->clevel);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
        spin_unlock(&pers_lock);
        if (mddev->level != pers->level) {
@@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev)
            pers->start_reshape == NULL) {
                /* This personality cannot handle reshaping... */
                module_put(pers->owner);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
 
        if (pers->sync_request) {
@@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev)
                mddev->private = NULL;
                module_put(pers->owner);
                bitmap_destroy(mddev);
-               return err;
+               goto abort;
        }
        if (mddev->queue) {
                bool nonrot = true;
index 478cf446827f469c1d02d6f2918fcb8dd870f893..35bd3a62451b30fec0cca41fcdc687bb7920aa56 100644 (file)
@@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev)
                            disk->rdev->saved_raid_disk < 0)
                                conf->fullsync = 1;
                }
+
+               if (disk->replacement &&
+                   !test_bit(In_sync, &disk->replacement->flags) &&
+                   disk->replacement->saved_raid_disk < 0) {
+                       conf->fullsync = 1;
+               }
+
                disk->recovery_disabled = mddev->recovery_disabled - 1;
        }
 
index 40826bba06b6d52c06bef7eb64bea6a719496dd0..fcfab6635f9c6649a64e0b144f55df672d621389 100644 (file)
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev)
        bpf_prog_array_free(rcdev->raw->progs);
 }
 
-int lirc_prog_attach(const union bpf_attr *attr)
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
-       struct bpf_prog *prog;
        struct rc_dev *rcdev;
        int ret;
 
        if (attr->attach_flags)
                return -EINVAL;
 
-       prog = bpf_prog_get_type(attr->attach_bpf_fd,
-                                BPF_PROG_TYPE_LIRC_MODE2);
-       if (IS_ERR(prog))
-               return PTR_ERR(prog);
-
        rcdev = rc_dev_get_from_fd(attr->target_fd);
-       if (IS_ERR(rcdev)) {
-               bpf_prog_put(prog);
+       if (IS_ERR(rcdev))
                return PTR_ERR(rcdev);
-       }
 
        ret = lirc_bpf_attach(rcdev, prog);
-       if (ret)
-               bpf_prog_put(prog);
 
        put_device(&rcdev->dev);
 
index a0c655628d6d5283fd9b5cd8234b8b09857d9c75..1b64ac8c5bc86309061a5540d385eb6e2ca866cc 100644 (file)
@@ -2526,7 +2526,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 
 struct ppb_lock {
        struct flchip *chip;
-       loff_t offset;
+       unsigned long adr;
        int locked;
 };
 
@@ -2544,8 +2544,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
        unsigned long timeo;
        int ret;
 
+       adr += chip->start;
        mutex_lock(&chip->mutex);
-       ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+       ret = get_chip(map, chip, adr, FL_LOCKING);
        if (ret) {
                mutex_unlock(&chip->mutex);
                return ret;
@@ -2563,8 +2564,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
 
        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
                chip->state = FL_LOCKING;
-               map_write(map, CMD(0xA0), chip->start + adr);
-               map_write(map, CMD(0x00), chip->start + adr);
+               map_write(map, CMD(0xA0), adr);
+               map_write(map, CMD(0x00), adr);
        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
                /*
                 * Unlocking of one specific sector is not supported, so we
@@ -2602,7 +2603,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
        map_write(map, CMD(0x00), chip->start);
 
        chip->state = FL_READY;
-       put_chip(map, chip, adr + chip->start);
+       put_chip(map, chip, adr);
        mutex_unlock(&chip->mutex);
 
        return ret;
@@ -2659,9 +2660,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
                 * sectors shall be unlocked, so lets keep their locking
                 * status at "unlocked" (locked=0) for the final re-locking.
                 */
-               if ((adr < ofs) || (adr >= (ofs + len))) {
+               if ((offset < ofs) || (offset >= (ofs + len))) {
                        sect[sectors].chip = &cfi->chips[chipnum];
-                       sect[sectors].offset = offset;
+                       sect[sectors].adr = adr;
                        sect[sectors].locked = do_ppb_xxlock(
                                map, &cfi->chips[chipnum], adr, 0,
                                DO_XXLOCK_ONEBLOCK_GETLOCK);
@@ -2675,6 +2676,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
                        i++;
 
                if (adr >> cfi->chipshift) {
+                       if (offset >= (ofs + len))
+                               break;
                        adr = 0;
                        chipnum++;
 
@@ -2705,7 +2708,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
         */
        for (i = 0; i < sectors; i++) {
                if (sect[i].locked)
-                       do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+                       do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
                                      DO_XXLOCK_ONEBLOCK_LOCK);
        }
 
index 3a6f450d1093c4c59d8b79bfafbc0d0c8c744722..53febe8a68c3cdfadfad784bca3335879a86d1f8 100644 (file)
@@ -733,8 +733,8 @@ static struct flash_info dataflash_data[] = {
        { "AT45DB642x",  0x1f2800, 8192, 1056, 11, SUP_POW2PS},
        { "at45db642d",  0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
 
-       { "AT45DB641E",  0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
-       { "at45db641e",  0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
+       { "AT45DB641E",  0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
+       { "at45db641e",  0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
 };
 
 static struct flash_info *jedec_lookup(struct spi_device *spi,
index cfd33e6ca77f903a6afc636e73f31ffb40d0d0bd..5869e90cc14b3c1f367b17a4f58bdb31c1e188ea 100644 (file)
@@ -123,7 +123,11 @@ static int denali_dt_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       denali->clk_x_rate = clk_get_rate(dt->clk);
+       /*
+        * Hardcode the clock rate for the backward compatibility.
+        * This works for both SOCFPGA and UniPhier.
+        */
+       denali->clk_x_rate = 200000000;
 
        ret = denali_init(denali);
        if (ret)
index 45786e707b7bd1ae5a4cba72825bcfb00fdda370..26cef218bb43ee1bd1fb2cf1dbadeb8ad38e2f8e 100644 (file)
@@ -48,7 +48,7 @@
 #define NFC_V1_V2_CONFIG               (host->regs + 0x0a)
 #define NFC_V1_V2_ECC_STATUS_RESULT    (host->regs + 0x0c)
 #define NFC_V1_V2_RSLTMAIN_AREA                (host->regs + 0x0e)
-#define NFC_V1_V2_RSLTSPARE_AREA       (host->regs + 0x10)
+#define NFC_V21_RSLTSPARE_AREA         (host->regs + 0x10)
 #define NFC_V1_V2_WRPROT               (host->regs + 0x12)
 #define NFC_V1_UNLOCKSTART_BLKADDR     (host->regs + 0x14)
 #define NFC_V1_UNLOCKEND_BLKADDR       (host->regs + 0x16)
@@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd)
        writew(config1, NFC_V1_V2_CONFIG1);
        /* preset operation */
 
+       /* spare area size in 16-bit half-words */
+       writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
        /* Unlock the internal RAM Buffer */
        writew(0x2, NFC_V1_V2_CONFIG);
 
index 10c4f9919850c3e7b56ed6bdb083a0fc35a0b7f5..b01d15ec4c56bfbdded578526d76e2ed12b65093 100644 (file)
@@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
 
        for (; page < page_end; page++) {
                res = chip->ecc.read_oob(mtd, chip, page);
-               if (res)
+               if (res < 0)
                        return res;
 
                bad = chip->oob_poi[chip->badblockpos];
index 7ed1f87e742a7accbbeb441b02e199878b3ef035..49c546c97c6f9a370ff64636deaf778bce2c3ce0 100644 (file)
 
 #include <linux/mtd/rawnand.h>
 
+/*
+ * Macronix AC series does not support using SET/GET_FEATURES to change
+ * the timings unlike what is declared in the parameter page. Unflag
+ * this feature to avoid unnecessary downturns.
+ */
+static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
+{
+       unsigned int i;
+       static const char * const broken_get_timings[] = {
+               "MX30LF1G18AC",
+               "MX30LF1G28AC",
+               "MX30LF2G18AC",
+               "MX30LF2G28AC",
+               "MX30LF4G18AC",
+               "MX30LF4G28AC",
+               "MX60LF8G18AC",
+       };
+
+       if (!chip->parameters.supports_set_get_features)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
+               if (!strcmp(broken_get_timings[i], chip->parameters.model))
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(broken_get_timings))
+               return;
+
+       bitmap_clear(chip->parameters.get_feature_list,
+                    ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+       bitmap_clear(chip->parameters.set_feature_list,
+                    ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+}
+
 static int macronix_nand_init(struct nand_chip *chip)
 {
        if (nand_is_slc(chip))
                chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
 
-       /*
-        * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change
-        * the timings unlike what is declared in the parameter page. Unflag
-        * this feature to avoid unnecessary downturns.
-        */
-       if (chip->parameters.supports_set_get_features &&
-           !strcmp("MX30LF2G18AC", chip->parameters.model)) {
-               bitmap_clear(chip->parameters.get_feature_list,
-                            ONFI_FEATURE_ADDR_TIMING_MODE, 1);
-               bitmap_clear(chip->parameters.set_feature_list,
-                            ONFI_FEATURE_ADDR_TIMING_MODE, 1);
-       }
+       macronix_nand_fix_broken_get_timings(chip);
 
        return 0;
 }
index 0af45b134c0cf859902f3d138b305bf5836d526d..5ec4c90a637d549a644441461ffc7717c623a4bc 100644 (file)
@@ -66,7 +66,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
 
        if (p->supports_set_get_features) {
                set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
+               set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
                set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
+               set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
        }
 
        return 0;
index 63e3844c5becf5e973e10fa2aa533f668ac8e30b..9a2ea3c1f9495312d2ac0ee47917c5797fe7b9ed 100644 (file)
@@ -4094,7 +4094,8 @@ static inline int bond_slave_override(struct bonding *bond,
 
 
 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
-                            void *accel_priv, select_queue_fallback_t fallback)
+                            struct net_device *sb_dev,
+                            select_queue_fallback_t fallback)
 {
        /* This helper function exists to help dev_pick_tx get the correct
         * destination queue.  Using a helper function skips a call to
index 2b81b97e994f1743c67ad1aff565542a1321a07c..0b76a3a6977ed78b56466dd731600c0a22c8652d 100644 (file)
@@ -52,6 +52,17 @@ config NET_DSA_QCA8K
          This enables support for the Qualcomm Atheros QCA8K Ethernet
          switch chips.
 
+config NET_DSA_REALTEK_SMI
+       tristate "Realtek SMI Ethernet switch family support"
+       depends on NET_DSA
+       select FIXED_PHY
+       select IRQ_DOMAIN
+       select REALTEK_PHY
+       select REGMAP
+       ---help---
+         This enables support for the Realtek SMI-based switch
+         chips, currently only RTL8366RB.
+
 config NET_DSA_SMSC_LAN9303
        tristate
        select NET_DSA_TAG_LAN9303
@@ -76,4 +87,15 @@ config NET_DSA_SMSC_LAN9303_MDIO
          Enable access functions if the SMSC/Microchip LAN9303 is configured
          for MDIO managed mode.
 
+config NET_DSA_VITESSE_VSC73XX
+       tristate "Vitesse VSC7385/7388/7395/7398 support"
+       depends on OF && SPI
+       depends on NET_DSA
+       select FIXED_PHY
+       select VITESSE_PHY
+       select GPIOLIB
+       ---help---
+         This enables support for the Vitesse VSC7385, VSC7388,
+         VSC7395 and VSC7398 SparX integrated ethernet switches.
+
 endmenu
index 15c2a831edf192b2678901c9a4c6fce7e9df62cd..46c1cba91ffebfdd35a475fcb7cc66d76f249909 100644 (file)
@@ -8,9 +8,12 @@ endif
 obj-$(CONFIG_NET_DSA_MT7530)   += mt7530.o
 obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
 obj-$(CONFIG_NET_DSA_QCA8K)    += qca8k.o
+obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek.o
+realtek-objs                   := realtek-smi.o rtl8366.o rtl8366rb.o
 obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
 obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
 obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
+obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o
 obj-y                          += b53/
 obj-y                          += microchip/
 obj-y                          += mv88e6xxx/
index 02e8982519cebcfad46f21dbc6c53d605e6b6f8a..ac96ff40d37e1b0f1d46902a929542f1f7fdcea9 100644 (file)
@@ -220,7 +220,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
                                 struct phy_device *phy)
 {
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
-       u32 off, reg;
+       u32 reg;
 
        if (priv->wol_ports_mask & (1 << port))
                return;
@@ -231,11 +231,6 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
        if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
                bcm_sf2_gphy_enable_set(ds, false);
 
-       if (dsa_is_cpu_port(ds, port))
-               off = CORE_IMP_CTL;
-       else
-               off = CORE_G_PCTL_PORT(port);
-
        b53_disable_port(ds, port, phy);
 
        /* Power down the port memory */
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
new file mode 100644 (file)
index 0000000..f941f45
--- /dev/null
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek Simple Management Interface (SMI) driver
+ * It can be discussed how "simple" this interface is.
+ *
+ * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
+ * but the protocol is not MDIO at all. Instead it is a Realtek
+ * pecularity that need to bit-bang the lines in a special way to
+ * communicate with the switch.
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366   - The original version, apparently
+ * RTL8369   - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ *             different register layout from the other two
+ * RTL8366S  - Is this "RTL8366 super"?
+ * RTL8367   - Has an OpenWRT driver as well
+ * RTL8368S  - Seems to be an alternative name for RTL8366RB
+ * RTL8370   - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+
+#include "realtek-smi.h"
+
+#define REALTEK_SMI_ACK_RETRY_COUNT            5
+#define REALTEK_SMI_HW_STOP_DELAY              25      /* msecs */
+#define REALTEK_SMI_HW_START_DELAY             100     /* msecs */
+
+static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
+{
+       ndelay(smi->clk_delay);
+}
+
+static void realtek_smi_start(struct realtek_smi *smi)
+{
+       /* Set GPIO pins to output mode, with initial state:
+        * SCK = 0, SDA = 1
+        */
+       gpiod_direction_output(smi->mdc, 0);
+       gpiod_direction_output(smi->mdio, 1);
+       realtek_smi_clk_delay(smi);
+
+       /* CLK 1: 0 -> 1, 1 -> 0 */
+       gpiod_set_value(smi->mdc, 1);
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdc, 0);
+       realtek_smi_clk_delay(smi);
+
+       /* CLK 2: */
+       gpiod_set_value(smi->mdc, 1);
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdio, 0);
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdc, 0);
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdio, 1);
+}
+
+static void realtek_smi_stop(struct realtek_smi *smi)
+{
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdio, 0);
+       gpiod_set_value(smi->mdc, 1);
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdio, 1);
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdc, 1);
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdc, 0);
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdc, 1);
+
+       /* Add a click */
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdc, 0);
+       realtek_smi_clk_delay(smi);
+       gpiod_set_value(smi->mdc, 1);
+
+       /* Set GPIO pins to input mode */
+       gpiod_direction_input(smi->mdio);
+       gpiod_direction_input(smi->mdc);
+}
+
+static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
+{
+       for (; len > 0; len--) {
+               realtek_smi_clk_delay(smi);
+
+               /* Prepare data */
+               gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
+               realtek_smi_clk_delay(smi);
+
+               /* Clocking */
+               gpiod_set_value(smi->mdc, 1);
+               realtek_smi_clk_delay(smi);
+               gpiod_set_value(smi->mdc, 0);
+       }
+}
+
+static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
+{
+       gpiod_direction_input(smi->mdio);
+
+       for (*data = 0; len > 0; len--) {
+               u32 u;
+
+               realtek_smi_clk_delay(smi);
+
+               /* Clocking */
+               gpiod_set_value(smi->mdc, 1);
+               realtek_smi_clk_delay(smi);
+               u = !!gpiod_get_value(smi->mdio);
+               gpiod_set_value(smi->mdc, 0);
+
+               *data |= (u << (len - 1));
+       }
+
+       gpiod_direction_output(smi->mdio, 0);
+}
+
+static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
+{
+       int retry_cnt;
+
+       retry_cnt = 0;
+       do {
+               u32 ack;
+
+               realtek_smi_read_bits(smi, 1, &ack);
+               if (ack == 0)
+                       break;
+
+               if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
+                       dev_err(smi->dev, "ACK timeout\n");
+                       return -ETIMEDOUT;
+               }
+       } while (1);
+
+       return 0;
+}
+
+static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
+{
+       realtek_smi_write_bits(smi, data, 8);
+       return realtek_smi_wait_for_ack(smi);
+}
+
+static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
+{
+       realtek_smi_write_bits(smi, data, 8);
+       return 0;
+}
+
+static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
+{
+       u32 t;
+
+       /* Read data */
+       realtek_smi_read_bits(smi, 8, &t);
+       *data = (t & 0xff);
+
+       /* Send an ACK */
+       realtek_smi_write_bits(smi, 0x00, 1);
+
+       return 0;
+}
+
+static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
+{
+       u32 t;
+
+       /* Read data */
+       realtek_smi_read_bits(smi, 8, &t);
+       *data = (t & 0xff);
+
+       /* Send an ACK */
+       realtek_smi_write_bits(smi, 0x01, 1);
+
+       return 0;
+}
+
+static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
+{
+       unsigned long flags;
+       u8 lo = 0;
+       u8 hi = 0;
+       int ret;
+
+       spin_lock_irqsave(&smi->lock, flags);
+
+       realtek_smi_start(smi);
+
+       /* Send READ command */
+       ret = realtek_smi_write_byte(smi, smi->cmd_read);
+       if (ret)
+               goto out;
+
+       /* Set ADDR[7:0] */
+       ret = realtek_smi_write_byte(smi, addr & 0xff);
+       if (ret)
+               goto out;
+
+       /* Set ADDR[15:8] */
+       ret = realtek_smi_write_byte(smi, addr >> 8);
+       if (ret)
+               goto out;
+
+       /* Read DATA[7:0] */
+       realtek_smi_read_byte0(smi, &lo);
+       /* Read DATA[15:8] */
+       realtek_smi_read_byte1(smi, &hi);
+
+       *data = ((u32)lo) | (((u32)hi) << 8);
+
+       ret = 0;
+
+ out:
+       realtek_smi_stop(smi);
+       spin_unlock_irqrestore(&smi->lock, flags);
+
+       return ret;
+}
+
+static int realtek_smi_write_reg(struct realtek_smi *smi,
+                                u32 addr, u32 data, bool ack)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&smi->lock, flags);
+
+       realtek_smi_start(smi);
+
+       /* Send WRITE command */
+       ret = realtek_smi_write_byte(smi, smi->cmd_write);
+       if (ret)
+               goto out;
+
+       /* Set ADDR[7:0] */
+       ret = realtek_smi_write_byte(smi, addr & 0xff);
+       if (ret)
+               goto out;
+
+       /* Set ADDR[15:8] */
+       ret = realtek_smi_write_byte(smi, addr >> 8);
+       if (ret)
+               goto out;
+
+       /* Write DATA[7:0] */
+       ret = realtek_smi_write_byte(smi, data & 0xff);
+       if (ret)
+               goto out;
+
+       /* Write DATA[15:8] */
+       if (ack)
+               ret = realtek_smi_write_byte(smi, data >> 8);
+       else
+               ret = realtek_smi_write_byte_noack(smi, data >> 8);
+       if (ret)
+               goto out;
+
+       ret = 0;
+
+ out:
+       realtek_smi_stop(smi);
+       spin_unlock_irqrestore(&smi->lock, flags);
+
+       return ret;
+}
+
+/* There is one single case when we need to use this accessor and that
+ * is when issueing soft reset. Since the device reset as soon as we write
+ * that bit, no ACK will come back for natural reasons.
+ */
+int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
+                               u32 data)
+{
+       return realtek_smi_write_reg(smi, addr, data, false);
+}
+EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
+
+/* Regmap accessors */
+
+static int realtek_smi_write(void *ctx, u32 reg, u32 val)
+{
+       struct realtek_smi *smi = ctx;
+
+       return realtek_smi_write_reg(smi, reg, val, true);
+}
+
+static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
+{
+       struct realtek_smi *smi = ctx;
+
+       return realtek_smi_read_reg(smi, reg, val);
+}
+
+static const struct regmap_config realtek_smi_mdio_regmap_config = {
+       .reg_bits = 10, /* A4..A0 R4..R0 */
+       .val_bits = 16,
+       .reg_stride = 1,
+       /* PHY regs are at 0x8000 */
+       .max_register = 0xffff,
+       .reg_format_endian = REGMAP_ENDIAN_BIG,
+       .reg_read = realtek_smi_read,
+       .reg_write = realtek_smi_write,
+       .cache_type = REGCACHE_NONE,
+};
+
+static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+       struct realtek_smi *smi = bus->priv;
+
+       return smi->ops->phy_read(smi, addr, regnum);
+}
+
+static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
+                                 u16 val)
+{
+       struct realtek_smi *smi = bus->priv;
+
+       return smi->ops->phy_write(smi, addr, regnum, val);
+}
+
+int realtek_smi_setup_mdio(struct realtek_smi *smi)
+{
+       struct device_node *mdio_np;
+       int ret;
+
+       mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
+                                         "realtek,smi-mdio");
+       if (!mdio_np) {
+               dev_err(smi->dev, "no MDIO bus node\n");
+               return -ENODEV;
+       }
+
+       smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
+       if (!smi->slave_mii_bus)
+               return -ENOMEM;
+       smi->slave_mii_bus->priv = smi;
+       smi->slave_mii_bus->name = "SMI slave MII";
+       smi->slave_mii_bus->read = realtek_smi_mdio_read;
+       smi->slave_mii_bus->write = realtek_smi_mdio_write;
+       snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
+                smi->ds->index);
+       smi->slave_mii_bus->dev.of_node = mdio_np;
+       smi->slave_mii_bus->parent = smi->dev;
+       smi->ds->slave_mii_bus = smi->slave_mii_bus;
+
+       ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
+       if (ret) {
+               dev_err(smi->dev, "unable to register MDIO bus %s\n",
+                       smi->slave_mii_bus->id);
+               of_node_put(mdio_np);
+       }
+
+       return 0;
+}
+
+static int realtek_smi_probe(struct platform_device *pdev)
+{
+       const struct realtek_smi_variant *var;
+       struct device *dev = &pdev->dev;
+       struct realtek_smi *smi;
+       struct device_node *np;
+       int ret;
+
+       var = of_device_get_match_data(dev);
+       np = dev->of_node;
+
+       smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL);
+       if (!smi)
+               return -ENOMEM;
+       smi->map = devm_regmap_init(dev, NULL, smi,
+                                   &realtek_smi_mdio_regmap_config);
+       if (IS_ERR(smi->map)) {
+               ret = PTR_ERR(smi->map);
+               dev_err(dev, "regmap init failed: %d\n", ret);
+               return ret;
+       }
+
+       /* Link forward and backward */
+       smi->dev = dev;
+       smi->clk_delay = var->clk_delay;
+       smi->cmd_read = var->cmd_read;
+       smi->cmd_write = var->cmd_write;
+       smi->ops = var->ops;
+
+       dev_set_drvdata(dev, smi);
+       spin_lock_init(&smi->lock);
+
+       /* TODO: if power is software controlled, set up any regulators here */
+
+       /* Assert then deassert RESET */
+       smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(smi->reset)) {
+               dev_err(dev, "failed to get RESET GPIO\n");
+               return PTR_ERR(smi->reset);
+       }
+       msleep(REALTEK_SMI_HW_STOP_DELAY);
+       gpiod_set_value(smi->reset, 0);
+       msleep(REALTEK_SMI_HW_START_DELAY);
+       dev_info(dev, "deasserted RESET\n");
+
+       /* Fetch MDIO pins */
+       smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
+       if (IS_ERR(smi->mdc))
+               return PTR_ERR(smi->mdc);
+       smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
+       if (IS_ERR(smi->mdio))
+               return PTR_ERR(smi->mdio);
+
+       smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+       ret = smi->ops->detect(smi);
+       if (ret) {
+               dev_err(dev, "unable to detect switch\n");
+               return ret;
+       }
+
+       smi->ds = dsa_switch_alloc(dev, smi->num_ports);
+       if (!smi->ds)
+               return -ENOMEM;
+       smi->ds->priv = smi;
+
+       smi->ds->ops = var->ds_ops;
+       ret = dsa_register_switch(smi->ds);
+       if (ret) {
+               dev_err(dev, "unable to register switch ret = %d\n", ret);
+               return ret;
+       }
+       return 0;
+}
+
+static int realtek_smi_remove(struct platform_device *pdev)
+{
+       struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
+
+       dsa_unregister_switch(smi->ds);
+       gpiod_set_value(smi->reset, 1);
+
+       return 0;
+}
+
+static const struct of_device_id realtek_smi_of_match[] = {
+       {
+               .compatible = "realtek,rtl8366rb",
+               .data = &rtl8366rb_variant,
+       },
+       {
+               /* FIXME: add support for RTL8366S and more */
+               .compatible = "realtek,rtl8366s",
+               .data = NULL,
+       },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
+
+static struct platform_driver realtek_smi_driver = {
+       .driver = {
+               .name = "realtek-smi",
+               .of_match_table = of_match_ptr(realtek_smi_of_match),
+       },
+       .probe  = realtek_smi_probe,
+       .remove = realtek_smi_remove,
+};
+module_platform_driver(realtek_smi_driver);
diff --git a/drivers/net/dsa/realtek-smi.h b/drivers/net/dsa/realtek-smi.h
new file mode 100644 (file)
index 0000000..9a63b51
--- /dev/null
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Realtek SMI interface driver defines
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#ifndef _REALTEK_SMI_H
+#define _REALTEK_SMI_H
+
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <net/dsa.h>
+
+struct realtek_smi_ops;
+struct dentry;
+struct inode;
+struct file;
+
+struct rtl8366_mib_counter {
+       unsigned int    base;
+       unsigned int    offset;
+       unsigned int    length;
+       const char      *name;
+};
+
+struct rtl8366_vlan_mc {
+       u16     vid;
+       u16     untag;
+       u16     member;
+       u8      fid;
+       u8      priority;
+};
+
+struct rtl8366_vlan_4k {
+       u16     vid;
+       u16     untag;
+       u16     member;
+       u8      fid;
+};
+
+struct realtek_smi {
+       struct device           *dev;
+       struct gpio_desc        *reset;
+       struct gpio_desc        *mdc;
+       struct gpio_desc        *mdio;
+       struct regmap           *map;
+       struct mii_bus          *slave_mii_bus;
+
+       unsigned int            clk_delay;
+       u8                      cmd_read;
+       u8                      cmd_write;
+       spinlock_t              lock; /* Locks around command writes */
+       struct dsa_switch       *ds;
+       struct irq_domain       *irqdomain;
+       bool                    leds_disabled;
+
+       unsigned int            cpu_port;
+       unsigned int            num_ports;
+       unsigned int            num_vlan_mc;
+       unsigned int            num_mib_counters;
+       struct rtl8366_mib_counter *mib_counters;
+
+       const struct realtek_smi_ops *ops;
+
+       int                     vlan_enabled;
+       int                     vlan4k_enabled;
+
+       char                    buf[4096];
+};
+
+/**
+ * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
+ * @detect: detects the chiptype
+ */
+struct realtek_smi_ops {
+       int     (*detect)(struct realtek_smi *smi);
+       int     (*reset_chip)(struct realtek_smi *smi);
+       int     (*setup)(struct realtek_smi *smi);
+       void    (*cleanup)(struct realtek_smi *smi);
+       int     (*get_mib_counter)(struct realtek_smi *smi,
+                                  int port,
+                                  struct rtl8366_mib_counter *mib,
+                                  u64 *mibvalue);
+       int     (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
+                              struct rtl8366_vlan_mc *vlanmc);
+       int     (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
+                              const struct rtl8366_vlan_mc *vlanmc);
+       int     (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
+                              struct rtl8366_vlan_4k *vlan4k);
+       int     (*set_vlan_4k)(struct realtek_smi *smi,
+                              const struct rtl8366_vlan_4k *vlan4k);
+       int     (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
+       int     (*set_mc_index)(struct realtek_smi *smi, int port, int index);
+       bool    (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
+       int     (*enable_vlan)(struct realtek_smi *smi, bool enable);
+       int     (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
+       int     (*enable_port)(struct realtek_smi *smi, int port, bool enable);
+       int     (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
+       int     (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
+                            u16 val);
+};
+
+struct realtek_smi_variant {
+       const struct dsa_switch_ops *ds_ops;
+       const struct realtek_smi_ops *ops;
+       unsigned int clk_delay;
+       u8 cmd_read;
+       u8 cmd_write;
+};
+
+/* SMI core calls */
+int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
+                               u32 data);
+int realtek_smi_setup_mdio(struct realtek_smi *smi);
+
+/* RTL8366 library helpers */
+int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
+int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+                    u32 untag, u32 fid);
+int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
+int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+                    unsigned int vid);
+int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
+int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
+int rtl8366_reset_vlan(struct realtek_smi *smi);
+int rtl8366_init_vlan(struct realtek_smi *smi);
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port,
+                          bool vlan_filtering);
+int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
+                        const struct switchdev_obj_port_vlan *vlan);
+void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+                     const struct switchdev_obj_port_vlan *vlan);
+int rtl8366_vlan_del(struct dsa_switch *ds, int port,
+                    const struct switchdev_obj_port_vlan *vlan);
+void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+                        uint8_t *data);
+int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
+void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
+
+extern const struct realtek_smi_variant rtl8366rb_variant;
+
+#endif /*  _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
new file mode 100644 (file)
index 0000000..6dedd43
--- /dev/null
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI library helpers for the RTL8366x variants
+ * RTL8366RB and RTL8366S
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ */
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+
+#include "realtek-smi.h"
+
+int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
+{
+       int ret;
+       int i;
+
+       *used = 0;
+       for (i = 0; i < smi->num_ports; i++) {
+               int index = 0;
+
+               ret = smi->ops->get_mc_index(smi, i, &index);
+               if (ret)
+                       return ret;
+
+               if (mc_index == index) {
+                       *used = 1;
+                       break;
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+
+int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+                    u32 untag, u32 fid)
+{
+       struct rtl8366_vlan_4k vlan4k;
+       int ret;
+       int i;
+
+       /* Update the 4K table */
+       ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+       if (ret)
+               return ret;
+
+       vlan4k.member = member;
+       vlan4k.untag = untag;
+       vlan4k.fid = fid;
+       ret = smi->ops->set_vlan_4k(smi, &vlan4k);
+       if (ret)
+               return ret;
+
+       /* Try to find an existing MC entry for this VID */
+       for (i = 0; i < smi->num_vlan_mc; i++) {
+               struct rtl8366_vlan_mc vlanmc;
+
+               ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+               if (ret)
+                       return ret;
+
+               if (vid == vlanmc.vid) {
+                       /* update the MC entry */
+                       vlanmc.member = member;
+                       vlanmc.untag = untag;
+                       vlanmc.fid = fid;
+
+                       ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+                       break;
+               }
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+
+int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
+{
+       struct rtl8366_vlan_mc vlanmc;
+       int ret;
+       int index;
+
+       ret = smi->ops->get_mc_index(smi, port, &index);
+       if (ret)
+               return ret;
+
+       ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
+       if (ret)
+               return ret;
+
+       *val = vlanmc.vid;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
+
+int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+                    unsigned int vid)
+{
+       struct rtl8366_vlan_mc vlanmc;
+       struct rtl8366_vlan_4k vlan4k;
+       int ret;
+       int i;
+
+       /* Try to find an existing MC entry for this VID */
+       for (i = 0; i < smi->num_vlan_mc; i++) {
+               ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+               if (ret)
+                       return ret;
+
+               if (vid == vlanmc.vid) {
+                       ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+                       if (ret)
+                               return ret;
+
+                       ret = smi->ops->set_mc_index(smi, port, i);
+                       return ret;
+               }
+       }
+
+       /* We have no MC entry for this VID, try to find an empty one */
+       for (i = 0; i < smi->num_vlan_mc; i++) {
+               ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+               if (ret)
+                       return ret;
+
+               if (vlanmc.vid == 0 && vlanmc.member == 0) {
+                       /* Update the entry from the 4K table */
+                       ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+                       if (ret)
+                               return ret;
+
+                       vlanmc.vid = vid;
+                       vlanmc.member = vlan4k.member;
+                       vlanmc.untag = vlan4k.untag;
+                       vlanmc.fid = vlan4k.fid;
+                       ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+                       if (ret)
+                               return ret;
+
+                       ret = smi->ops->set_mc_index(smi, port, i);
+                       return ret;
+               }
+       }
+
+       /* MC table is full, try to find an unused entry and replace it */
+       for (i = 0; i < smi->num_vlan_mc; i++) {
+               int used;
+
+               ret = rtl8366_mc_is_used(smi, i, &used);
+               if (ret)
+                       return ret;
+
+               if (!used) {
+                       /* Update the entry from the 4K table */
+                       ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+                       if (ret)
+                               return ret;
+
+                       vlanmc.vid = vid;
+                       vlanmc.member = vlan4k.member;
+                       vlanmc.untag = vlan4k.untag;
+                       vlanmc.fid = vlan4k.fid;
+                       ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+                       if (ret)
+                               return ret;
+
+                       ret = smi->ops->set_mc_index(smi, port, i);
+                       return ret;
+               }
+       }
+
+       dev_err(smi->dev,
+               "all VLAN member configurations are in use\n");
+
+       return -ENOSPC;
+}
+EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+
+int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
+{
+       int ret;
+
+       /* To enable 4k VLAN, ordinary VLAN must be enabled first,
+        * but if we disable 4k VLAN it is fine to leave ordinary
+        * VLAN enabled.
+        */
+       if (enable) {
+               /* Make sure VLAN is ON */
+               ret = smi->ops->enable_vlan(smi, true);
+               if (ret)
+                       return ret;
+
+               smi->vlan_enabled = true;
+       }
+
+       ret = smi->ops->enable_vlan4k(smi, enable);
+       if (ret)
+               return ret;
+
+       smi->vlan4k_enabled = enable;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
+
+int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
+{
+       int ret;
+
+       ret = smi->ops->enable_vlan(smi, enable);
+       if (ret)
+               return ret;
+
+       smi->vlan_enabled = enable;
+
+       /* If we turn VLAN off, make sure that we turn off
+        * 4k VLAN as well, if that happened to be on.
+        */
+       if (!enable) {
+               smi->vlan4k_enabled = false;
+               ret = smi->ops->enable_vlan4k(smi, false);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
+
+int rtl8366_reset_vlan(struct realtek_smi *smi)
+{
+       struct rtl8366_vlan_mc vlanmc;
+       int ret;
+       int i;
+
+       rtl8366_enable_vlan(smi, false);
+       rtl8366_enable_vlan4k(smi, false);
+
+       /* Clear the 16 VLAN member configurations */
+       vlanmc.vid = 0;
+       vlanmc.priority = 0;
+       vlanmc.member = 0;
+       vlanmc.untag = 0;
+       vlanmc.fid = 0;
+       for (i = 0; i < smi->num_vlan_mc; i++) {
+               ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
+
+int rtl8366_init_vlan(struct realtek_smi *smi)
+{
+       int port;
+       int ret;
+
+       ret = rtl8366_reset_vlan(smi);
+       if (ret)
+               return ret;
+
+       /* Loop over the available ports, for each port, associate
+        * it with the VLAN (port+1)
+        */
+       for (port = 0; port < smi->num_ports; port++) {
+               u32 mask;
+
+               if (port == smi->cpu_port)
+                       /* For the CPU port, make all ports members of this
+                        * VLAN.
+                        */
+                       mask = GENMASK(smi->num_ports - 1, 0);
+               else
+                       /* For all other ports, enable itself plus the
+                        * CPU port.
+                        */
+                       mask = BIT(port) | BIT(smi->cpu_port);
+
+               /* For each port, set the port as member of VLAN (port+1)
+                * and untagged, except for the CPU port: the CPU port (5) is
+                * member of VLAN 6 and so are ALL the other ports as well.
+                * Use filter 0 (no filter).
+                */
+               dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n",
+                        (port + 1), port, mask);
+               ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0);
+               if (ret)
+                       return ret;
+
+               dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n",
+                        (port + 1), port, (port + 1));
+               ret = rtl8366_set_pvid(smi, port, (port + 1));
+               if (ret)
+                       return ret;
+       }
+
+       return rtl8366_enable_vlan(smi, true);
+}
+EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
+
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+{
+       struct realtek_smi *smi = ds->priv;
+       struct rtl8366_vlan_4k vlan4k;
+       int ret;
+
+       if (!smi->ops->is_vlan_valid(smi, port))
+               return -EINVAL;
+
+       dev_info(smi->dev, "%s filtering on port %d\n",
+                vlan_filtering ? "enable" : "disable",
+                port);
+
+       /* TODO:
+        * The hardware support filter ID (FID) 0..7, I have no clue how to
+        * support this in the driver when the callback only says on/off.
+        */
+       ret = smi->ops->get_vlan_4k(smi, port, &vlan4k);
+       if (ret)
+               return ret;
+
+       /* Just set the filter to FID 1 for now then */
+       ret = rtl8366_set_vlan(smi, port,
+                              vlan4k.member,
+                              vlan4k.untag,
+                              1);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
+
+int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
+                        const struct switchdev_obj_port_vlan *vlan)
+{
+       struct realtek_smi *smi = ds->priv;
+       int ret;
+
+       if (!smi->ops->is_vlan_valid(smi, port))
+               return -EINVAL;
+
+       dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
+                vlan->vid_begin, vlan->vid_end);
+
+       /* Enable VLAN in the hardware
+        * FIXME: what's with this 4k business?
+        * Just rtl8366_enable_vlan() seems inconclusive.
+        */
+       ret = rtl8366_enable_vlan4k(smi, true);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare);
+
+void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+                     const struct switchdev_obj_port_vlan *vlan)
+{
+       bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+       bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
+       struct realtek_smi *smi = ds->priv;
+       u32 member = 0;
+       u32 untag = 0;
+       u16 vid;
+       int ret;
+
+       if (!smi->ops->is_vlan_valid(smi, port))
+               return;
+
+       dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
+                port,
+                untagged ? "untagged" : "tagged",
+                pvid ? " PVID" : "no PVID");
+
+       if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+               dev_err(smi->dev, "port is DSA or CPU port\n");
+
+       for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+               int pvid_val = 0;
+
+               dev_info(smi->dev, "add VLAN %04x\n", vid);
+               member |= BIT(port);
+
+               if (untagged)
+                       untag |= BIT(port);
+
+               /* To ensure that we have a valid MC entry for this VLAN,
+                * initialize the port VLAN ID here.
+                */
+               ret = rtl8366_get_pvid(smi, port, &pvid_val);
+               if (ret < 0) {
+                       dev_err(smi->dev, "could not lookup PVID for port %d\n",
+                               port);
+                       return;
+               }
+               if (pvid_val == 0) {
+                       ret = rtl8366_set_pvid(smi, port, vid);
+                       if (ret < 0)
+                               return;
+               }
+       }
+
+       ret = rtl8366_set_vlan(smi, port, member, untag, 0);
+       if (ret)
+               dev_err(smi->dev,
+                       "failed to set up VLAN %04x",
+                       vid);
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+
+int rtl8366_vlan_del(struct dsa_switch *ds, int port,
+                    const struct switchdev_obj_port_vlan *vlan)
+{
+       struct realtek_smi *smi = ds->priv;
+       u16 vid;
+       int ret;
+
+       dev_info(smi->dev, "del VLAN on port %d\n", port);
+
+       for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+               int i;
+
+               dev_info(smi->dev, "del VLAN %04x\n", vid);
+
+               for (i = 0; i < smi->num_vlan_mc; i++) {
+                       struct rtl8366_vlan_mc vlanmc;
+
+                       ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+                       if (ret)
+                               return ret;
+
+                       if (vid == vlanmc.vid) {
+                               /* clear VLAN member configurations */
+                               vlanmc.vid = 0;
+                               vlanmc.priority = 0;
+                               vlanmc.member = 0;
+                               vlanmc.untag = 0;
+                               vlanmc.fid = 0;
+
+                               ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+                               if (ret) {
+                                       dev_err(smi->dev,
+                                               "failed to remove VLAN %04x\n",
+                                               vid);
+                                       return ret;
+                               }
+                               break;
+                       }
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
+
+void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+                        uint8_t *data)
+{
+       struct realtek_smi *smi = ds->priv;
+       struct rtl8366_mib_counter *mib;
+       int i;
+
+       if (port >= smi->num_ports)
+               return;
+
+       for (i = 0; i < smi->num_mib_counters; i++) {
+               mib = &smi->mib_counters[i];
+               strncpy(data + i * ETH_GSTRING_LEN,
+                       mib->name, ETH_GSTRING_LEN);
+       }
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_strings);
+
+int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+       struct realtek_smi *smi = ds->priv;
+
+       /* We only support SS_STATS */
+       if (sset != ETH_SS_STATS)
+               return 0;
+       if (port >= smi->num_ports)
+               return -EINVAL;
+
+       return smi->num_mib_counters;
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
+
+void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
+{
+       struct realtek_smi *smi = ds->priv;
+       int i;
+       int ret;
+
+       if (port >= smi->num_ports)
+               return;
+
+       for (i = 0; i < smi->num_mib_counters; i++) {
+               struct rtl8366_mib_counter *mib;
+               u64 mibvalue = 0;
+
+               mib = &smi->mib_counters[i];
+               ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
+               if (ret) {
+                       dev_err(smi->dev, "error reading MIB counter %s\n",
+                               mib->name);
+               }
+               data[i] = mibvalue;
+       }
+}
+EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
new file mode 100644 (file)
index 0000000..1e55b9b
--- /dev/null
@@ -0,0 +1,1424 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI subdriver for the Realtek RTL8366RB ethernet switch
+ *
+ * This is a sparsely documented chip, the only viable documentation seems
+ * to be a patched up code drop from the vendor that appear in various
+ * GPL source trees.
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include "realtek-smi.h"
+
+#define RTL8366RB_PORT_NUM_CPU         5
+#define RTL8366RB_NUM_PORTS            6
+#define RTL8366RB_PHY_NO_MAX           4
+#define RTL8366RB_PHY_ADDR_MAX         31
+
+/* Switch Global Configuration register */
+#define RTL8366RB_SGCR                         0x0000
+#define RTL8366RB_SGCR_EN_BC_STORM_CTRL                BIT(0)
+#define RTL8366RB_SGCR_MAX_LENGTH(a)           ((a) << 4)
+#define RTL8366RB_SGCR_MAX_LENGTH_MASK         RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_MAX_LENGTH_1522         RTL8366RB_SGCR_MAX_LENGTH(0x0)
+#define RTL8366RB_SGCR_MAX_LENGTH_1536         RTL8366RB_SGCR_MAX_LENGTH(0x1)
+#define RTL8366RB_SGCR_MAX_LENGTH_1552         RTL8366RB_SGCR_MAX_LENGTH(0x2)
+#define RTL8366RB_SGCR_MAX_LENGTH_9216         RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_EN_VLAN                 BIT(13)
+#define RTL8366RB_SGCR_EN_VLAN_4KTB            BIT(14)
+
+/* Port Enable Control register */
+#define RTL8366RB_PECR                         0x0001
+
+/* Switch Security Control registers */
+#define RTL8366RB_SSCR0                                0x0002
+#define RTL8366RB_SSCR1                                0x0003
+#define RTL8366RB_SSCR2                                0x0004
+#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA                BIT(0)
+
+/* Port Mirror Control Register */
+#define RTL8366RB_PMCR                         0x0007
+#define RTL8366RB_PMCR_SOURCE_PORT(a)          (a)
+#define RTL8366RB_PMCR_SOURCE_PORT_MASK                0x000f
+#define RTL8366RB_PMCR_MONITOR_PORT(a)         ((a) << 4)
+#define RTL8366RB_PMCR_MONITOR_PORT_MASK       0x00f0
+#define RTL8366RB_PMCR_MIRROR_RX               BIT(8)
+#define RTL8366RB_PMCR_MIRROR_TX               BIT(9)
+#define RTL8366RB_PMCR_MIRROR_SPC              BIT(10)
+#define RTL8366RB_PMCR_MIRROR_ISO              BIT(11)
+
+/* bits 0..7 = port 0, bits 8..15 = port 1 */
+#define RTL8366RB_PAACR0               0x0010
+/* bits 0..7 = port 2, bits 8..15 = port 3 */
+#define RTL8366RB_PAACR1               0x0011
+/* bits 0..7 = port 4, bits 8..15 = port 5 */
+#define RTL8366RB_PAACR2               0x0012
+#define RTL8366RB_PAACR_SPEED_10M      0
+#define RTL8366RB_PAACR_SPEED_100M     1
+#define RTL8366RB_PAACR_SPEED_1000M    2
+#define RTL8366RB_PAACR_FULL_DUPLEX    BIT(2)
+#define RTL8366RB_PAACR_LINK_UP                BIT(4)
+#define RTL8366RB_PAACR_TX_PAUSE       BIT(5)
+#define RTL8366RB_PAACR_RX_PAUSE       BIT(6)
+#define RTL8366RB_PAACR_AN             BIT(7)
+
+#define RTL8366RB_PAACR_CPU_PORT       (RTL8366RB_PAACR_SPEED_1000M | \
+                                        RTL8366RB_PAACR_FULL_DUPLEX | \
+                                        RTL8366RB_PAACR_LINK_UP | \
+                                        RTL8366RB_PAACR_TX_PAUSE | \
+                                        RTL8366RB_PAACR_RX_PAUSE)
+
+/* bits 0..7 = port 0, bits 8..15 = port 1 */
+#define RTL8366RB_PSTAT0               0x0014
+/* bits 0..7 = port 2, bits 8..15 = port 3 */
+#define RTL8366RB_PSTAT1               0x0015
+/* bits 0..7 = port 4, bits 8..15 = port 5 */
+#define RTL8366RB_PSTAT2               0x0016
+
+#define RTL8366RB_POWER_SAVING_REG     0x0021
+
+/* CPU port control reg */
+#define RTL8368RB_CPU_CTRL_REG         0x0061
+#define RTL8368RB_CPU_PORTS_MSK                0x00FF
+/* Enables inserting custom tag length/type 0x8899 */
+#define RTL8368RB_CPU_INSTAG           BIT(15)
+
+#define RTL8366RB_SMAR0                        0x0070 /* bits 0..15 */
+#define RTL8366RB_SMAR1                        0x0071 /* bits 16..31 */
+#define RTL8366RB_SMAR2                        0x0072 /* bits 32..47 */
+
+#define RTL8366RB_RESET_CTRL_REG               0x0100
+#define RTL8366RB_CHIP_CTRL_RESET_HW           BIT(0)
+#define RTL8366RB_CHIP_CTRL_RESET_SW           BIT(1)
+
+#define RTL8366RB_CHIP_ID_REG                  0x0509
+#define RTL8366RB_CHIP_ID_8366                 0x5937
+#define RTL8366RB_CHIP_VERSION_CTRL_REG                0x050A
+#define RTL8366RB_CHIP_VERSION_MASK            0xf
+
+/* PHY registers control */
+#define RTL8366RB_PHY_ACCESS_CTRL_REG          0x8000
+#define RTL8366RB_PHY_CTRL_READ                        BIT(0)
+#define RTL8366RB_PHY_CTRL_WRITE               0
+#define RTL8366RB_PHY_ACCESS_BUSY_REG          0x8001
+#define RTL8366RB_PHY_INT_BUSY                 BIT(0)
+#define RTL8366RB_PHY_EXT_BUSY                 BIT(4)
+#define RTL8366RB_PHY_ACCESS_DATA_REG          0x8002
+#define RTL8366RB_PHY_EXT_CTRL_REG             0x8010
+#define RTL8366RB_PHY_EXT_WRDATA_REG           0x8011
+#define RTL8366RB_PHY_EXT_RDDATA_REG           0x8012
+
+#define RTL8366RB_PHY_REG_MASK                 0x1f
+#define RTL8366RB_PHY_PAGE_OFFSET              5
+#define RTL8366RB_PHY_PAGE_MASK                        (0xf << 5)
+#define RTL8366RB_PHY_NO_OFFSET                        9
+#define RTL8366RB_PHY_NO_MASK                  (0x1f << 9)
+
+#define RTL8366RB_VLAN_INGRESS_CTRL2_REG       0x037f
+
+/* LED control registers */
+#define RTL8366RB_LED_BLINKRATE_REG            0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK           0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS           0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS           0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS           0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS          0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS          0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS          0x0005
+
+#define RTL8366RB_LED_CTRL_REG                 0x0431
+#define RTL8366RB_LED_OFF                      0x0
+#define RTL8366RB_LED_DUP_COL                  0x1
+#define RTL8366RB_LED_LINK_ACT                 0x2
+#define RTL8366RB_LED_SPD1000                  0x3
+#define RTL8366RB_LED_SPD100                   0x4
+#define RTL8366RB_LED_SPD10                    0x5
+#define RTL8366RB_LED_SPD1000_ACT              0x6
+#define RTL8366RB_LED_SPD100_ACT               0x7
+#define RTL8366RB_LED_SPD10_ACT                        0x8
+#define RTL8366RB_LED_SPD100_10_ACT            0x9
+#define RTL8366RB_LED_FIBER                    0xa
+#define RTL8366RB_LED_AN_FAULT                 0xb
+#define RTL8366RB_LED_LINK_RX                  0xc
+#define RTL8366RB_LED_LINK_TX                  0xd
+#define RTL8366RB_LED_MASTER                   0xe
+#define RTL8366RB_LED_FORCE                    0xf
+#define RTL8366RB_LED_0_1_CTRL_REG             0x0432
+#define RTL8366RB_LED_1_OFFSET                 6
+#define RTL8366RB_LED_2_3_CTRL_REG             0x0433
+#define RTL8366RB_LED_3_OFFSET                 6
+
+#define RTL8366RB_MIB_COUNT                    33
+#define RTL8366RB_GLOBAL_MIB_COUNT             1
+#define RTL8366RB_MIB_COUNTER_PORT_OFFSET      0x0050
+#define RTL8366RB_MIB_COUNTER_BASE             0x1000
+#define RTL8366RB_MIB_CTRL_REG                 0x13F0
+#define RTL8366RB_MIB_CTRL_USER_MASK           0x0FFC
+#define RTL8366RB_MIB_CTRL_BUSY_MASK           BIT(0)
+#define RTL8366RB_MIB_CTRL_RESET_MASK          BIT(1)
+#define RTL8366RB_MIB_CTRL_PORT_RESET(_p)      BIT(2 + (_p))
+#define RTL8366RB_MIB_CTRL_GLOBAL_RESET                BIT(11)
+
+#define RTL8366RB_PORT_VLAN_CTRL_BASE          0x0063
+#define RTL8366RB_PORT_VLAN_CTRL_REG(_p)  \
+               (RTL8366RB_PORT_VLAN_CTRL_BASE + (_p) / 4)
+#define RTL8366RB_PORT_VLAN_CTRL_MASK          0xf
+#define RTL8366RB_PORT_VLAN_CTRL_SHIFT(_p)     (4 * ((_p) % 4))
+
+#define RTL8366RB_VLAN_TABLE_READ_BASE         0x018C
+#define RTL8366RB_VLAN_TABLE_WRITE_BASE                0x0185
+
+#define RTL8366RB_TABLE_ACCESS_CTRL_REG                0x0180
+#define RTL8366RB_TABLE_VLAN_READ_CTRL         0x0E01
+#define RTL8366RB_TABLE_VLAN_WRITE_CTRL                0x0F01
+
+#define RTL8366RB_VLAN_MC_BASE(_x)             (0x0020 + (_x) * 3)
+
+#define RTL8366RB_PORT_LINK_STATUS_BASE                0x0014
+#define RTL8366RB_PORT_STATUS_SPEED_MASK       0x0003
+#define RTL8366RB_PORT_STATUS_DUPLEX_MASK      0x0004
+#define RTL8366RB_PORT_STATUS_LINK_MASK                0x0010
+#define RTL8366RB_PORT_STATUS_TXPAUSE_MASK     0x0020
+#define RTL8366RB_PORT_STATUS_RXPAUSE_MASK     0x0040
+#define RTL8366RB_PORT_STATUS_AN_MASK          0x0080
+
+#define RTL8366RB_NUM_VLANS            16
+#define RTL8366RB_NUM_LEDGROUPS                4
+#define RTL8366RB_NUM_VIDS             4096
+#define RTL8366RB_PRIORITYMAX          7
+#define RTL8366RB_FIDMAX               7
+
+#define RTL8366RB_PORT_1               BIT(0) /* In userspace port 0 */
+#define RTL8366RB_PORT_2               BIT(1) /* In userspace port 1 */
+#define RTL8366RB_PORT_3               BIT(2) /* In userspace port 2 */
+#define RTL8366RB_PORT_4               BIT(3) /* In userspace port 3 */
+#define RTL8366RB_PORT_5               BIT(4) /* In userspace port 4 */
+
+#define RTL8366RB_PORT_CPU             BIT(5) /* CPU port */
+
+#define RTL8366RB_PORT_ALL             (RTL8366RB_PORT_1 |     \
+                                        RTL8366RB_PORT_2 |     \
+                                        RTL8366RB_PORT_3 |     \
+                                        RTL8366RB_PORT_4 |     \
+                                        RTL8366RB_PORT_5 |     \
+                                        RTL8366RB_PORT_CPU)
+
+#define RTL8366RB_PORT_ALL_BUT_CPU     (RTL8366RB_PORT_1 |     \
+                                        RTL8366RB_PORT_2 |     \
+                                        RTL8366RB_PORT_3 |     \
+                                        RTL8366RB_PORT_4 |     \
+                                        RTL8366RB_PORT_5)
+
+#define RTL8366RB_PORT_ALL_EXTERNAL    (RTL8366RB_PORT_1 |     \
+                                        RTL8366RB_PORT_2 |     \
+                                        RTL8366RB_PORT_3 |     \
+                                        RTL8366RB_PORT_4)
+
+#define RTL8366RB_PORT_ALL_INTERNAL     RTL8366RB_PORT_CPU
+
+/* First configuration word per member config, VID and prio */
+#define RTL8366RB_VLAN_VID_MASK                0xfff
+#define RTL8366RB_VLAN_PRIORITY_SHIFT  12
+#define RTL8366RB_VLAN_PRIORITY_MASK   0x7
+/* Second configuration word per member config, member and untagged */
+#define RTL8366RB_VLAN_UNTAG_SHIFT     8
+#define RTL8366RB_VLAN_UNTAG_MASK      0xff
+#define RTL8366RB_VLAN_MEMBER_MASK     0xff
+/* Third config word per member config, STAG currently unused */
+#define RTL8366RB_VLAN_STAG_MBR_MASK   0xff
+#define RTL8366RB_VLAN_STAG_MBR_SHIFT  8
+#define RTL8366RB_VLAN_STAG_IDX_MASK   0x7
+#define RTL8366RB_VLAN_STAG_IDX_SHIFT  5
+#define RTL8366RB_VLAN_FID_MASK                0x7
+
+/* Port ingress bandwidth control */
+#define RTL8366RB_IB_BASE              0x0200
+#define RTL8366RB_IB_REG(pnum)         (RTL8366RB_IB_BASE + (pnum))
+#define RTL8366RB_IB_BDTH_MASK         0x3fff
+#define RTL8366RB_IB_PREIFG            BIT(14)
+
+/* Port egress bandwidth control */
+#define RTL8366RB_EB_BASE              0x02d1
+#define RTL8366RB_EB_REG(pnum)         (RTL8366RB_EB_BASE + (pnum))
+#define RTL8366RB_EB_BDTH_MASK         0x3fff
+#define RTL8366RB_EB_PREIFG_REG                0x02f8
+#define RTL8366RB_EB_PREIFG            BIT(9)
+
+#define RTL8366RB_BDTH_SW_MAX          1048512 /* 1048576? */
+#define RTL8366RB_BDTH_UNIT            64
+#define RTL8366RB_BDTH_REG_DEFAULT     16383
+
+/* QOS */
+#define RTL8366RB_QOS                  BIT(15)
+/* Include/Exclude Preamble and IFG (20 bytes). 0:Exclude, 1:Include. */
+#define RTL8366RB_QOS_DEFAULT_PREIFG   1
+
+/* Interrupt handling */
+#define RTL8366RB_INTERRUPT_CONTROL_REG        0x0440
+#define RTL8366RB_INTERRUPT_POLARITY   BIT(0)
+#define RTL8366RB_P4_RGMII_LED         BIT(2)
+#define RTL8366RB_INTERRUPT_MASK_REG   0x0441
+#define RTL8366RB_INTERRUPT_LINK_CHGALL        GENMASK(11, 0)
+#define RTL8366RB_INTERRUPT_ACLEXCEED  BIT(8)
+#define RTL8366RB_INTERRUPT_STORMEXCEED        BIT(9)
+#define RTL8366RB_INTERRUPT_P4_FIBER   BIT(12)
+#define RTL8366RB_INTERRUPT_P4_UTP     BIT(13)
+#define RTL8366RB_INTERRUPT_VALID      (RTL8366RB_INTERRUPT_LINK_CHGALL | \
+                                        RTL8366RB_INTERRUPT_ACLEXCEED | \
+                                        RTL8366RB_INTERRUPT_STORMEXCEED | \
+                                        RTL8366RB_INTERRUPT_P4_FIBER | \
+                                        RTL8366RB_INTERRUPT_P4_UTP)
+#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
+#define RTL8366RB_NUM_INTERRUPT                14 /* 0..13 */
+
+/* bits 0..5 enable force when cleared */
+#define RTL8366RB_MAC_FORCE_CTRL_REG   0x0F11
+
+#define RTL8366RB_OAM_PARSER_REG       0x0F14
+#define RTL8366RB_OAM_MULTIPLEXER_REG  0x0F15
+
+#define RTL8366RB_GREEN_FEATURE_REG    0x0F51
+#define RTL8366RB_GREEN_FEATURE_MSK    0x0007
+#define RTL8366RB_GREEN_FEATURE_TX     BIT(0)
+#define RTL8366RB_GREEN_FEATURE_RX     BIT(2)
+
+static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
+       { 0,  0, 4, "IfInOctets"                                },
+       { 0,  4, 4, "EtherStatsOctets"                          },
+       { 0,  8, 2, "EtherStatsUnderSizePkts"                   },
+       { 0, 10, 2, "EtherFragments"                            },
+       { 0, 12, 2, "EtherStatsPkts64Octets"                    },
+       { 0, 14, 2, "EtherStatsPkts65to127Octets"               },
+       { 0, 16, 2, "EtherStatsPkts128to255Octets"              },
+       { 0, 18, 2, "EtherStatsPkts256to511Octets"              },
+       { 0, 20, 2, "EtherStatsPkts512to1023Octets"             },
+       { 0, 22, 2, "EtherStatsPkts1024to1518Octets"            },
+       { 0, 24, 2, "EtherOversizeStats"                        },
+       { 0, 26, 2, "EtherStatsJabbers"                         },
+       { 0, 28, 2, "IfInUcastPkts"                             },
+       { 0, 30, 2, "EtherStatsMulticastPkts"                   },
+       { 0, 32, 2, "EtherStatsBroadcastPkts"                   },
+       { 0, 34, 2, "EtherStatsDropEvents"                      },
+       { 0, 36, 2, "Dot3StatsFCSErrors"                        },
+       { 0, 38, 2, "Dot3StatsSymbolErrors"                     },
+       { 0, 40, 2, "Dot3InPauseFrames"                         },
+       { 0, 42, 2, "Dot3ControlInUnknownOpcodes"               },
+       { 0, 44, 4, "IfOutOctets"                               },
+       { 0, 48, 2, "Dot3StatsSingleCollisionFrames"            },
+       { 0, 50, 2, "Dot3StatMultipleCollisionFrames"           },
+       { 0, 52, 2, "Dot3sDeferredTransmissions"                },
+       { 0, 54, 2, "Dot3StatsLateCollisions"                   },
+       { 0, 56, 2, "EtherStatsCollisions"                      },
+       { 0, 58, 2, "Dot3StatsExcessiveCollisions"              },
+       { 0, 60, 2, "Dot3OutPauseFrames"                        },
+       { 0, 62, 2, "Dot1dBasePortDelayExceededDiscards"        },
+       { 0, 64, 2, "Dot1dTpPortInDiscards"                     },
+       { 0, 66, 2, "IfOutUcastPkts"                            },
+       { 0, 68, 2, "IfOutMulticastPkts"                        },
+       { 0, 70, 2, "IfOutBroadcastPkts"                        },
+};
+
+static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
+                                    int port,
+                                    struct rtl8366_mib_counter *mib,
+                                    u64 *mibvalue)
+{
+       u32 addr, val;
+       int ret;
+       int i;
+
+       addr = RTL8366RB_MIB_COUNTER_BASE +
+               RTL8366RB_MIB_COUNTER_PORT_OFFSET * (port) +
+               mib->offset;
+
+       /* Writing access counter address first
+        * then ASIC will prepare 64bits counter wait for being retrived
+        */
+       ret = regmap_write(smi->map, addr, 0); /* Write whatever */
+       if (ret)
+               return ret;
+
+       /* Read MIB control register */
+       ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
+       if (ret)
+               return -EIO;
+
+       if (val & RTL8366RB_MIB_CTRL_BUSY_MASK)
+               return -EBUSY;
+
+       if (val & RTL8366RB_MIB_CTRL_RESET_MASK)
+               return -EIO;
+
+       /* Read each individual MIB 16 bits at the time */
+       *mibvalue = 0;
+       for (i = mib->length; i > 0; i--) {
+               ret = regmap_read(smi->map, addr + (i - 1), &val);
+               if (ret)
+                       return ret;
+               *mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
+       }
+       return 0;
+}
+
+static u32 rtl8366rb_get_irqmask(struct irq_data *d)
+{
+       int line = irqd_to_hwirq(d);
+       u32 val;
+
+       /* For line interrupts we combine link down in bits
+        * 6..11 with link up in bits 0..5 into one interrupt.
+        */
+       if (line < 12)
+               val = BIT(line) | BIT(line + 6);
+       else
+               val = BIT(line);
+       return val;
+}
+
+static void rtl8366rb_mask_irq(struct irq_data *d)
+{
+       struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+       int ret;
+
+       ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+                                rtl8366rb_get_irqmask(d), 0);
+       if (ret)
+               dev_err(smi->dev, "could not mask IRQ\n");
+}
+
+static void rtl8366rb_unmask_irq(struct irq_data *d)
+{
+       struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+       int ret;
+
+       ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+                                rtl8366rb_get_irqmask(d),
+                                rtl8366rb_get_irqmask(d));
+       if (ret)
+               dev_err(smi->dev, "could not unmask IRQ\n");
+}
+
+static irqreturn_t rtl8366rb_irq(int irq, void *data)
+{
+       struct realtek_smi *smi = data;
+       u32 stat;
+       int ret;
+
+       /* This clears the IRQ status register */
+       ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+                         &stat);
+       if (ret) {
+               dev_err(smi->dev, "can't read interrupt status\n");
+               return IRQ_NONE;
+       }
+       stat &= RTL8366RB_INTERRUPT_VALID;
+       if (!stat)
+               return IRQ_NONE;
+       while (stat) {
+               int line = __ffs(stat);
+               int child_irq;
+
+               stat &= ~BIT(line);
+               /* For line interrupts we combine link down in bits
+                * 6..11 with link up in bits 0..5 into one interrupt.
+                */
+               if (line < 12 && line > 5)
+                       line -= 5;
+               child_irq = irq_find_mapping(smi->irqdomain, line);
+               handle_nested_irq(child_irq);
+       }
+       return IRQ_HANDLED;
+}
+
+static struct irq_chip rtl8366rb_irq_chip = {
+       .name = "RTL8366RB",
+       .irq_mask = rtl8366rb_mask_irq,
+       .irq_unmask = rtl8366rb_unmask_irq,
+};
+
+static int rtl8366rb_irq_map(struct irq_domain *domain, unsigned int irq,
+                            irq_hw_number_t hwirq)
+{
+       irq_set_chip_data(irq, domain->host_data);
+       irq_set_chip_and_handler(irq, &rtl8366rb_irq_chip, handle_simple_irq);
+       irq_set_nested_thread(irq, 1);
+       irq_set_noprobe(irq);
+
+       return 0;
+}
+
+static void rtl8366rb_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+       irq_set_nested_thread(irq, 0);
+       irq_set_chip_and_handler(irq, NULL, NULL);
+       irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
+       .map = rtl8366rb_irq_map,
+       .unmap = rtl8366rb_irq_unmap,
+       .xlate  = irq_domain_xlate_onecell,
+};
+
+static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+{
+       struct device_node *intc;
+       unsigned long irq_trig;
+       int irq;
+       int ret;
+       u32 val;
+       int i;
+
+       intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+       if (!intc) {
+               dev_err(smi->dev, "missing child interrupt-controller node\n");
+               return -EINVAL;
+       }
+       /* RB8366RB IRQs cascade off this one */
+       irq = of_irq_get(intc, 0);
+       if (irq <= 0) {
+               dev_err(smi->dev, "failed to get parent IRQ\n");
+               return irq ? irq : -EINVAL;
+       }
+
+       /* This clears the IRQ status register */
+       ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+                         &val);
+       if (ret) {
+               dev_err(smi->dev, "can't read interrupt status\n");
+               return ret;
+       }
+
+       /* Fetch IRQ edge information from the descriptor */
+       irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+       switch (irq_trig) {
+       case IRQF_TRIGGER_RISING:
+       case IRQF_TRIGGER_HIGH:
+               dev_info(smi->dev, "active high/rising IRQ\n");
+               val = 0;
+               break;
+       case IRQF_TRIGGER_FALLING:
+       case IRQF_TRIGGER_LOW:
+               dev_info(smi->dev, "active low/falling IRQ\n");
+               val = RTL8366RB_INTERRUPT_POLARITY;
+               break;
+       }
+       ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
+                                RTL8366RB_INTERRUPT_POLARITY,
+                                val);
+       if (ret) {
+               dev_err(smi->dev, "could not configure IRQ polarity\n");
+               return ret;
+       }
+
+       ret = devm_request_threaded_irq(smi->dev, irq, NULL,
+                                       rtl8366rb_irq, IRQF_ONESHOT,
+                                       "RTL8366RB", smi);
+       if (ret) {
+               dev_err(smi->dev, "unable to request irq: %d\n", ret);
+               return ret;
+       }
+       smi->irqdomain = irq_domain_add_linear(intc,
+                                              RTL8366RB_NUM_INTERRUPT,
+                                              &rtl8366rb_irqdomain_ops,
+                                              smi);
+       if (!smi->irqdomain) {
+               dev_err(smi->dev, "failed to create IRQ domain\n");
+               return -EINVAL;
+       }
+       for (i = 0; i < smi->num_ports; i++)
+               irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
+
+       return 0;
+}
+
+static int rtl8366rb_set_addr(struct realtek_smi *smi)
+{
+       u8 addr[ETH_ALEN];
+       u16 val;
+       int ret;
+
+       eth_random_addr(addr);
+
+       dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+                addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+       val = addr[0] << 8 | addr[1];
+       ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
+       if (ret)
+               return ret;
+       val = addr[2] << 8 | addr[3];
+       ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
+       if (ret)
+               return ret;
+       val = addr[4] << 8 | addr[5];
+       ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+/* Found in a vendor driver */
+
+/* For the "version 0" early silicon, appear in most source releases */
+static const u16 rtl8366rb_init_jam_ver_0[] = {
+       0x000B, 0x0001, 0x03A6, 0x0100, 0x03A7, 0x0001, 0x02D1, 0x3FFF,
+       0x02D2, 0x3FFF, 0x02D3, 0x3FFF, 0x02D4, 0x3FFF, 0x02D5, 0x3FFF,
+       0x02D6, 0x3FFF, 0x02D7, 0x3FFF, 0x02D8, 0x3FFF, 0x022B, 0x0688,
+       0x022C, 0x0FAC, 0x03D0, 0x4688, 0x03D1, 0x01F5, 0x0000, 0x0830,
+       0x02F9, 0x0200, 0x02F7, 0x7FFF, 0x02F8, 0x03FF, 0x0080, 0x03E8,
+       0x0081, 0x00CE, 0x0082, 0x00DA, 0x0083, 0x0230, 0xBE0F, 0x2000,
+       0x0231, 0x422A, 0x0232, 0x422A, 0x0233, 0x422A, 0x0234, 0x422A,
+       0x0235, 0x422A, 0x0236, 0x422A, 0x0237, 0x422A, 0x0238, 0x422A,
+       0x0239, 0x422A, 0x023A, 0x422A, 0x023B, 0x422A, 0x023C, 0x422A,
+       0x023D, 0x422A, 0x023E, 0x422A, 0x023F, 0x422A, 0x0240, 0x422A,
+       0x0241, 0x422A, 0x0242, 0x422A, 0x0243, 0x422A, 0x0244, 0x422A,
+       0x0245, 0x422A, 0x0246, 0x422A, 0x0247, 0x422A, 0x0248, 0x422A,
+       0x0249, 0x0146, 0x024A, 0x0146, 0x024B, 0x0146, 0xBE03, 0xC961,
+       0x024D, 0x0146, 0x024E, 0x0146, 0x024F, 0x0146, 0x0250, 0x0146,
+       0xBE64, 0x0226, 0x0252, 0x0146, 0x0253, 0x0146, 0x024C, 0x0146,
+       0x0251, 0x0146, 0x0254, 0x0146, 0xBE62, 0x3FD0, 0x0084, 0x0320,
+       0x0255, 0x0146, 0x0256, 0x0146, 0x0257, 0x0146, 0x0258, 0x0146,
+       0x0259, 0x0146, 0x025A, 0x0146, 0x025B, 0x0146, 0x025C, 0x0146,
+       0x025D, 0x0146, 0x025E, 0x0146, 0x025F, 0x0146, 0x0260, 0x0146,
+       0x0261, 0xA23F, 0x0262, 0x0294, 0x0263, 0xA23F, 0x0264, 0x0294,
+       0x0265, 0xA23F, 0x0266, 0x0294, 0x0267, 0xA23F, 0x0268, 0x0294,
+       0x0269, 0xA23F, 0x026A, 0x0294, 0x026B, 0xA23F, 0x026C, 0x0294,
+       0x026D, 0xA23F, 0x026E, 0x0294, 0x026F, 0xA23F, 0x0270, 0x0294,
+       0x02F5, 0x0048, 0xBE09, 0x0E00, 0xBE1E, 0x0FA0, 0xBE14, 0x8448,
+       0xBE15, 0x1007, 0xBE4A, 0xA284, 0xC454, 0x3F0B, 0xC474, 0x3F0B,
+       0xBE48, 0x3672, 0xBE4B, 0x17A7, 0xBE4C, 0x0B15, 0xBE52, 0x0EDD,
+       0xBE49, 0x8C00, 0xBE5B, 0x785C, 0xBE5C, 0x785C, 0xBE5D, 0x785C,
+       0xBE61, 0x368A, 0xBE63, 0x9B84, 0xC456, 0xCC13, 0xC476, 0xCC13,
+       0xBE65, 0x307D, 0xBE6D, 0x0005, 0xBE6E, 0xE120, 0xBE2E, 0x7BAF,
+};
+
+/* This v1 init sequence is from Belkin F5D8235 U-Boot release */
+static const u16 rtl8366rb_init_jam_ver_1[] = {
+       0x0000, 0x0830, 0x0001, 0x8000, 0x0400, 0x8130, 0xBE78, 0x3C3C,
+       0x0431, 0x5432, 0xBE37, 0x0CE4, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
+       0xC44C, 0x1585, 0xC44C, 0x1185, 0xC44C, 0x1585, 0xC46C, 0x1585,
+       0xC46C, 0x1185, 0xC46C, 0x1585, 0xC451, 0x2135, 0xC471, 0x2135,
+       0xBE10, 0x8140, 0xBE15, 0x0007, 0xBE6E, 0xE120, 0xBE69, 0xD20F,
+       0xBE6B, 0x0320, 0xBE24, 0xB000, 0xBE23, 0xFF51, 0xBE22, 0xDF20,
+       0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800, 0xBE24, 0x0000,
+       0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60, 0xBE21, 0x0140,
+       0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000, 0xBE2E, 0x7B7A,
+       0xBE36, 0x0CE4, 0x02F5, 0x0048, 0xBE77, 0x2940, 0x000A, 0x83E0,
+       0xBE79, 0x3C3C, 0xBE00, 0x1340,
+};
+
+/* This v2 init sequence is from Belkin F5D8235 U-Boot release */
+static const u16 rtl8366rb_init_jam_ver_2[] = {
+       0x0450, 0x0000, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
+       0xC44F, 0x6250, 0xC46F, 0x6250, 0xC456, 0x0C14, 0xC476, 0x0C14,
+       0xC44C, 0x1C85, 0xC44C, 0x1885, 0xC44C, 0x1C85, 0xC46C, 0x1C85,
+       0xC46C, 0x1885, 0xC46C, 0x1C85, 0xC44C, 0x0885, 0xC44C, 0x0881,
+       0xC44C, 0x0885, 0xC46C, 0x0885, 0xC46C, 0x0881, 0xC46C, 0x0885,
+       0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
+       0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6E, 0x0320,
+       0xBE77, 0x2940, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
+       0x8000, 0x0001, 0xBE15, 0x1007, 0x8000, 0x0000, 0xBE15, 0x1007,
+       0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160, 0xBE10, 0x8140,
+       0xBE00, 0x1340, 0x0F51, 0x0010,
+};
+
+/* Appears in a DDWRT code dump */
+static const u16 rtl8366rb_init_jam_ver_3[] = {
+       0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0431, 0x5432,
+       0x0F51, 0x0017, 0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0,
+       0xC456, 0x0C14, 0xC476, 0x0C14, 0xC454, 0x3F8B, 0xC474, 0x3F8B,
+       0xC450, 0x2071, 0xC470, 0x2071, 0xC451, 0x226B, 0xC471, 0x226B,
+       0xC452, 0xA293, 0xC472, 0xA293, 0xC44C, 0x1585, 0xC44C, 0x1185,
+       0xC44C, 0x1585, 0xC46C, 0x1585, 0xC46C, 0x1185, 0xC46C, 0x1585,
+       0xC44C, 0x0185, 0xC44C, 0x0181, 0xC44C, 0x0185, 0xC46C, 0x0185,
+       0xC46C, 0x0181, 0xC46C, 0x0185, 0xBE24, 0xB000, 0xBE23, 0xFF51,
+       0xBE22, 0xDF20, 0xBE21, 0x0140, 0xBE20, 0x00BB, 0xBE24, 0xB800,
+       0xBE24, 0x0000, 0xBE24, 0x7000, 0xBE23, 0xFF51, 0xBE22, 0xDF60,
+       0xBE21, 0x0140, 0xBE20, 0x0077, 0xBE24, 0x7800, 0xBE24, 0x0000,
+       0xBE2E, 0x7BA7, 0xBE36, 0x1000, 0xBE37, 0x1000, 0x8000, 0x0001,
+       0xBE69, 0xD50F, 0x8000, 0x0000, 0xBE69, 0xD50F, 0xBE6B, 0x0320,
+       0xBE77, 0x2800, 0xBE78, 0x3C3C, 0xBE79, 0x3C3C, 0xBE6E, 0xE120,
+       0x8000, 0x0001, 0xBE10, 0x8140, 0x8000, 0x0000, 0xBE10, 0x8140,
+       0xBE15, 0x1007, 0xBE14, 0x0448, 0xBE1E, 0x00A0, 0xBE10, 0x8160,
+       0xBE10, 0x8140, 0xBE00, 0x1340, 0x0450, 0x0000, 0x0401, 0x0000,
+};
+
+/* Belkin F5D8235 v1, "belkin,f5d8235-v1" */
+static const u16 rtl8366rb_init_jam_f5d8235[] = {
+       0x0242, 0x02BF, 0x0245, 0x02BF, 0x0248, 0x02BF, 0x024B, 0x02BF,
+       0x024E, 0x02BF, 0x0251, 0x02BF, 0x0254, 0x0A3F, 0x0256, 0x0A3F,
+       0x0258, 0x0A3F, 0x025A, 0x0A3F, 0x025C, 0x0A3F, 0x025E, 0x0A3F,
+       0x0263, 0x007C, 0x0100, 0x0004, 0xBE5B, 0x3500, 0x800E, 0x200F,
+       0xBE1D, 0x0F00, 0x8001, 0x5011, 0x800A, 0xA2F4, 0x800B, 0x17A3,
+       0xBE4B, 0x17A3, 0xBE41, 0x5011, 0xBE17, 0x2100, 0x8000, 0x8304,
+       0xBE40, 0x8304, 0xBE4A, 0xA2F4, 0x800C, 0xA8D5, 0x8014, 0x5500,
+       0x8015, 0x0004, 0xBE4C, 0xA8D5, 0xBE59, 0x0008, 0xBE09, 0x0E00,
+       0xBE36, 0x1036, 0xBE37, 0x1036, 0x800D, 0x00FF, 0xBE4D, 0x00FF,
+};
+
+/* DGN3500, "netgear,dgn3500", "netgear,dgn3500b" */
+static const u16 rtl8366rb_init_jam_dgn3500[] = {
+       0x0000, 0x0830, 0x0400, 0x8130, 0x000A, 0x83ED, 0x0F51, 0x0017,
+       0x02F5, 0x0048, 0x02FA, 0xFFDF, 0x02FB, 0xFFE0, 0x0450, 0x0000,
+       0x0401, 0x0000, 0x0431, 0x0960,
+};
+
+/* This jam table activates "green ethernet", which means low power mode
+ * and is claimed to detect the cable length and not use more power than
+ * necessary, and the ports should enter power saving mode 10 seconds after
+ * a cable is disconnected. Seems to always be the same.
+ */
+static const u16 rtl8366rb_green_jam[][2] = {
+       {0xBE78, 0x323C}, {0xBE77, 0x5000}, {0xBE2E, 0x7BA7},
+       {0xBE59, 0x3459}, {0xBE5A, 0x745A}, {0xBE5B, 0x785C},
+       {0xBE5C, 0x785C}, {0xBE6E, 0xE120}, {0xBE79, 0x323C},
+};
+
+static int rtl8366rb_setup(struct dsa_switch *ds)
+{
+       struct realtek_smi *smi = ds->priv;
+       const u16 *jam_table;
+       u32 chip_ver = 0;
+       u32 chip_id = 0;
+       int jam_size;
+       u32 val;
+       int ret;
+       int i;
+
+       ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
+       if (ret) {
+               dev_err(smi->dev, "unable to read chip id\n");
+               return ret;
+       }
+
+       switch (chip_id) {
+       case RTL8366RB_CHIP_ID_8366:
+               break;
+       default:
+               dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
+               return -ENODEV;
+       }
+
+       ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
+                         &chip_ver);
+       if (ret) {
+               dev_err(smi->dev, "unable to read chip version\n");
+               return ret;
+       }
+
+       dev_info(smi->dev, "RTL%04x ver %u chip found\n",
+                chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
+
+       /* Do the init dance using the right jam table */
+       switch (chip_ver) {
+       case 0:
+               jam_table = rtl8366rb_init_jam_ver_0;
+               jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_0);
+               break;
+       case 1:
+               jam_table = rtl8366rb_init_jam_ver_1;
+               jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_1);
+               break;
+       case 2:
+               jam_table = rtl8366rb_init_jam_ver_2;
+               jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_2);
+               break;
+       default:
+               jam_table = rtl8366rb_init_jam_ver_3;
+               jam_size = ARRAY_SIZE(rtl8366rb_init_jam_ver_3);
+               break;
+       }
+
+       /* Special jam tables for special routers
+        * TODO: are these necessary? Maintainers, please test
+        * without them, using just the off-the-shelf tables.
+        */
+       if (of_machine_is_compatible("belkin,f5d8235-v1")) {
+               jam_table = rtl8366rb_init_jam_f5d8235;
+               jam_size = ARRAY_SIZE(rtl8366rb_init_jam_f5d8235);
+       }
+       if (of_machine_is_compatible("netgear,dgn3500") ||
+           of_machine_is_compatible("netgear,dgn3500b")) {
+               jam_table = rtl8366rb_init_jam_dgn3500;
+               jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
+       }
+
+       i = 0;
+       while (i < jam_size) {
+               if ((jam_table[i] & 0xBE00) == 0xBE00) {
+                       ret = regmap_read(smi->map,
+                                         RTL8366RB_PHY_ACCESS_BUSY_REG,
+                                         &val);
+                       if (ret)
+                               return ret;
+                       if (!(val & RTL8366RB_PHY_INT_BUSY)) {
+                               ret = regmap_write(smi->map,
+                                               RTL8366RB_PHY_ACCESS_CTRL_REG,
+                                               RTL8366RB_PHY_CTRL_WRITE);
+                               if (ret)
+                                       return ret;
+                       }
+               }
+               dev_dbg(smi->dev, "jam %04x into register %04x\n",
+                       jam_table[i + 1],
+                       jam_table[i]);
+               ret = regmap_write(smi->map,
+                                  jam_table[i],
+                                  jam_table[i + 1]);
+               if (ret)
+                       return ret;
+               i += 2;
+       }
+
+       /* Set up the "green ethernet" feature */
+       i = 0;
+       while (i < ARRAY_SIZE(rtl8366rb_green_jam)) {
+               ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_BUSY_REG,
+                                 &val);
+               if (ret)
+                       return ret;
+               if (!(val & RTL8366RB_PHY_INT_BUSY)) {
+                       ret = regmap_write(smi->map,
+                                          RTL8366RB_PHY_ACCESS_CTRL_REG,
+                                          RTL8366RB_PHY_CTRL_WRITE);
+                       if (ret)
+                               return ret;
+                       ret = regmap_write(smi->map,
+                                          rtl8366rb_green_jam[i][0],
+                                          rtl8366rb_green_jam[i][1]);
+                       if (ret)
+                               return ret;
+                       i++;
+               }
+       }
+       ret = regmap_write(smi->map,
+                          RTL8366RB_GREEN_FEATURE_REG,
+                          (chip_ver == 1) ? 0x0007 : 0x0003);
+       if (ret)
+               return ret;
+
+       /* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
+       ret = regmap_write(smi->map, 0x0c, 0x240);
+       if (ret)
+               return ret;
+       ret = regmap_write(smi->map, 0x0d, 0x240);
+       if (ret)
+               return ret;
+
+       /* Set some random MAC address */
+       ret = rtl8366rb_set_addr(smi);
+       if (ret)
+               return ret;
+
+       /* Enable CPU port and enable inserting CPU tag
+        *
+        * Disabling RTL8368RB_CPU_INSTAG here will change the behaviour
+        * of the switch totally and it will start talking Realtek RRCP
+        * internally. It is probably possible to experiment with this,
+        * but then the kernel needs to understand and handle RRCP first.
+        */
+       ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
+                                0xFFFF,
+                                RTL8368RB_CPU_INSTAG | BIT(smi->cpu_port));
+       if (ret)
+               return ret;
+
+       /* Make sure we default-enable the fixed CPU port */
+       ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
+                                BIT(smi->cpu_port),
+                                0);
+       if (ret)
+               return ret;
+
+       /* Set maximum packet length to 1536 bytes */
+       ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
+                                RTL8366RB_SGCR_MAX_LENGTH_MASK,
+                                RTL8366RB_SGCR_MAX_LENGTH_1536);
+       if (ret)
+               return ret;
+
+       /* Enable learning for all ports */
+       ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
+       if (ret)
+               return ret;
+
+       /* Enable auto ageing for all ports */
+       ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0);
+       if (ret)
+               return ret;
+
+       /* Discard VLAN tagged packets if the port is not a member of
+        * the VLAN with which the packets is associated.
+        */
+       ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+                          RTL8366RB_PORT_ALL);
+       if (ret)
+               return ret;
+
+       /* Don't drop packets whose DA has not been learned */
+       ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
+                                RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
+       if (ret)
+               return ret;
+
+       /* Set blinking, TODO: make this configurable */
+       ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
+                                RTL8366RB_LED_BLINKRATE_MASK,
+                                RTL8366RB_LED_BLINKRATE_56MS);
+       if (ret)
+               return ret;
+
+       /* Set up LED activity:
+        * Each port has 4 LEDs, we configure all ports to the same
+        * behaviour (no individual config) but we can set up each
+        * LED separately.
+        */
+       if (smi->leds_disabled) {
+               /* Turn everything off */
+               regmap_update_bits(smi->map,
+                                  RTL8366RB_LED_0_1_CTRL_REG,
+                                  0x0FFF, 0);
+               regmap_update_bits(smi->map,
+                                  RTL8366RB_LED_2_3_CTRL_REG,
+                                  0x0FFF, 0);
+               regmap_update_bits(smi->map,
+                                  RTL8366RB_INTERRUPT_CONTROL_REG,
+                                  RTL8366RB_P4_RGMII_LED,
+                                  0);
+               val = RTL8366RB_LED_OFF;
+       } else {
+               /* TODO: make this configurable per LED */
+               val = RTL8366RB_LED_FORCE;
+       }
+       for (i = 0; i < 4; i++) {
+               ret = regmap_update_bits(smi->map,
+                                        RTL8366RB_LED_CTRL_REG,
+                                        0xf << (i * 4),
+                                        val << (i * 4));
+               if (ret)
+                       return ret;
+       }
+
+       ret = rtl8366_init_vlan(smi);
+       if (ret)
+               return ret;
+
+       ret = rtl8366rb_setup_cascaded_irq(smi);
+       if (ret)
+               dev_info(smi->dev, "no interrupt support\n");
+
+       ret = realtek_smi_setup_mdio(smi);
+       if (ret) {
+               dev_info(smi->dev, "could not set up MDIO bus\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
+                                                     int port)
+{
+       /* For now, the RTL switches are handled without any custom tags.
+        *
+        * It is possible to turn on "custom tags" by removing the
+        * RTL8368RB_CPU_INSTAG flag when enabling the port but what it
+        * does is unfamiliar to DSA: ethernet frames of type 8899, the Realtek
+        * Remote Control Protocol (RRCP) start to appear on the CPU port of
+        * the device. So this is not the ordinary few extra bytes in the
+        * frame. Instead it appears that the switch starts to talk Realtek
+        * RRCP internally which means a pretty complex RRCP implementation
+        * decoding and responding the RRCP protocol is needed to exploit this.
+        *
+        * The OpenRRCP project (dormant since 2009) have reverse-egineered
+        * parts of the protocol.
+        */
+       return DSA_TAG_PROTO_NONE;
+}
+
+static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port,
+                                 struct phy_device *phydev)
+{
+       struct realtek_smi *smi = ds->priv;
+       int ret;
+
+       if (port != smi->cpu_port)
+               return;
+
+       dev_info(smi->dev, "adjust link on CPU port (%d)\n", port);
+
+       /* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
+       ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
+                                BIT(port), BIT(port));
+       if (ret)
+               return;
+
+       ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
+                                0xFF00U,
+                                RTL8366RB_PAACR_CPU_PORT << 8);
+       if (ret)
+               return;
+
+       /* Enable the CPU port */
+       ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+                                0);
+       if (ret)
+               return;
+}
+
+static void rb8366rb_set_port_led(struct realtek_smi *smi,
+                                 int port, bool enable)
+{
+       u16 val = enable ? 0x3f : 0;
+       int ret;
+
+       if (smi->leds_disabled)
+               return;
+
+       switch (port) {
+       case 0:
+               ret = regmap_update_bits(smi->map,
+                                        RTL8366RB_LED_0_1_CTRL_REG,
+                                        0x3F, val);
+               break;
+       case 1:
+               ret = regmap_update_bits(smi->map,
+                                        RTL8366RB_LED_0_1_CTRL_REG,
+                                        0x3F << RTL8366RB_LED_1_OFFSET,
+                                        val << RTL8366RB_LED_1_OFFSET);
+               break;
+       case 2:
+               ret = regmap_update_bits(smi->map,
+                                        RTL8366RB_LED_2_3_CTRL_REG,
+                                        0x3F, val);
+               break;
+       case 3:
+               ret = regmap_update_bits(smi->map,
+                                        RTL8366RB_LED_2_3_CTRL_REG,
+                                        0x3F << RTL8366RB_LED_3_OFFSET,
+                                        val << RTL8366RB_LED_3_OFFSET);
+               break;
+       case 4:
+               ret = regmap_update_bits(smi->map,
+                                        RTL8366RB_INTERRUPT_CONTROL_REG,
+                                        RTL8366RB_P4_RGMII_LED,
+                                        enable ? RTL8366RB_P4_RGMII_LED : 0);
+               break;
+       default:
+               dev_err(smi->dev, "no LED for port %d\n", port);
+               return;
+       }
+       if (ret)
+               dev_err(smi->dev, "error updating LED on port %d\n", port);
+}
+
+static int
+rtl8366rb_port_enable(struct dsa_switch *ds, int port,
+                     struct phy_device *phy)
+{
+       struct realtek_smi *smi = ds->priv;
+       int ret;
+
+       dev_dbg(smi->dev, "enable port %d\n", port);
+       ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+                                0);
+       if (ret)
+               return ret;
+
+       rb8366rb_set_port_led(smi, port, true);
+       return 0;
+}
+
+static void
+rtl8366rb_port_disable(struct dsa_switch *ds, int port,
+                      struct phy_device *phy)
+{
+       struct realtek_smi *smi = ds->priv;
+       int ret;
+
+       dev_dbg(smi->dev, "disable port %d\n", port);
+       ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+                                BIT(port));
+       if (ret)
+               return;
+
+       rb8366rb_set_port_led(smi, port, false);
+}
+
+static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
+                                struct rtl8366_vlan_4k *vlan4k)
+{
+       u32 data[3];
+       int ret;
+       int i;
+
+       memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
+
+       if (vid >= RTL8366RB_NUM_VIDS)
+               return -EINVAL;
+
+       /* write VID */
+       ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
+                          vid & RTL8366RB_VLAN_VID_MASK);
+       if (ret)
+               return ret;
+
+       /* write table access control word */
+       ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+                          RTL8366RB_TABLE_VLAN_READ_CTRL);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < 3; i++) {
+               ret = regmap_read(smi->map,
+                                 RTL8366RB_VLAN_TABLE_READ_BASE + i,
+                                 &data[i]);
+               if (ret)
+                       return ret;
+       }
+
+       vlan4k->vid = vid;
+       vlan4k->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
+                       RTL8366RB_VLAN_UNTAG_MASK;
+       vlan4k->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
+       vlan4k->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
+
+       return 0;
+}
+
+static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
+                                const struct rtl8366_vlan_4k *vlan4k)
+{
+       u32 data[3];
+       int ret;
+       int i;
+
+       if (vlan4k->vid >= RTL8366RB_NUM_VIDS ||
+           vlan4k->member > RTL8366RB_VLAN_MEMBER_MASK ||
+           vlan4k->untag > RTL8366RB_VLAN_UNTAG_MASK ||
+           vlan4k->fid > RTL8366RB_FIDMAX)
+               return -EINVAL;
+
+       data[0] = vlan4k->vid & RTL8366RB_VLAN_VID_MASK;
+       data[1] = (vlan4k->member & RTL8366RB_VLAN_MEMBER_MASK) |
+                 ((vlan4k->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
+                       RTL8366RB_VLAN_UNTAG_SHIFT);
+       data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
+
+       for (i = 0; i < 3; i++) {
+               ret = regmap_write(smi->map,
+                                  RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
+                                  data[i]);
+               if (ret)
+                       return ret;
+       }
+
+       /* write table access control word */
+       ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+                          RTL8366RB_TABLE_VLAN_WRITE_CTRL);
+
+       return ret;
+}
+
+static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
+                                struct rtl8366_vlan_mc *vlanmc)
+{
+       u32 data[3];
+       int ret;
+       int i;
+
+       memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
+
+       if (index >= RTL8366RB_NUM_VLANS)
+               return -EINVAL;
+
+       for (i = 0; i < 3; i++) {
+               ret = regmap_read(smi->map,
+                                 RTL8366RB_VLAN_MC_BASE(index) + i,
+                                 &data[i]);
+               if (ret)
+                       return ret;
+       }
+
+       vlanmc->vid = data[0] & RTL8366RB_VLAN_VID_MASK;
+       vlanmc->priority = (data[0] >> RTL8366RB_VLAN_PRIORITY_SHIFT) &
+               RTL8366RB_VLAN_PRIORITY_MASK;
+       vlanmc->untag = (data[1] >> RTL8366RB_VLAN_UNTAG_SHIFT) &
+               RTL8366RB_VLAN_UNTAG_MASK;
+       vlanmc->member = data[1] & RTL8366RB_VLAN_MEMBER_MASK;
+       vlanmc->fid = data[2] & RTL8366RB_VLAN_FID_MASK;
+
+       return 0;
+}
+
+static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
+                                const struct rtl8366_vlan_mc *vlanmc)
+{
+       u32 data[3];
+       int ret;
+       int i;
+
+       if (index >= RTL8366RB_NUM_VLANS ||
+           vlanmc->vid >= RTL8366RB_NUM_VIDS ||
+           vlanmc->priority > RTL8366RB_PRIORITYMAX ||
+           vlanmc->member > RTL8366RB_VLAN_MEMBER_MASK ||
+           vlanmc->untag > RTL8366RB_VLAN_UNTAG_MASK ||
+           vlanmc->fid > RTL8366RB_FIDMAX)
+               return -EINVAL;
+
+       data[0] = (vlanmc->vid & RTL8366RB_VLAN_VID_MASK) |
+                 ((vlanmc->priority & RTL8366RB_VLAN_PRIORITY_MASK) <<
+                       RTL8366RB_VLAN_PRIORITY_SHIFT);
+       data[1] = (vlanmc->member & RTL8366RB_VLAN_MEMBER_MASK) |
+                 ((vlanmc->untag & RTL8366RB_VLAN_UNTAG_MASK) <<
+                       RTL8366RB_VLAN_UNTAG_SHIFT);
+       data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
+
+       for (i = 0; i < 3; i++) {
+               ret = regmap_write(smi->map,
+                                  RTL8366RB_VLAN_MC_BASE(index) + i,
+                                  data[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
+{
+       u32 data;
+       int ret;
+
+       if (port >= smi->num_ports)
+               return -EINVAL;
+
+       ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+                         &data);
+       if (ret)
+               return ret;
+
+       *val = (data >> RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)) &
+               RTL8366RB_PORT_VLAN_CTRL_MASK;
+
+       return 0;
+}
+
+static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
+{
+       if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
+               return -EINVAL;
+
+       return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+                               RTL8366RB_PORT_VLAN_CTRL_MASK <<
+                                       RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
+                               (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
+                                       RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
+}
+
+static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
+{
+       unsigned int max = RTL8366RB_NUM_VLANS;
+
+       if (smi->vlan4k_enabled)
+               max = RTL8366RB_NUM_VIDS - 1;
+
+       if (vlan == 0 || vlan >= max)
+               return false;
+
+       return true;
+}
+
+static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
+{
+       dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
+       return regmap_update_bits(smi->map,
+                                 RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
+                                 enable ? RTL8366RB_SGCR_EN_VLAN : 0);
+}
+
+static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
+{
+       dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+       return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+                                 RTL8366RB_SGCR_EN_VLAN_4KTB,
+                                 enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
+}
+
+static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+{
+       u32 val;
+       u32 reg;
+       int ret;
+
+       if (phy > RTL8366RB_PHY_NO_MAX)
+               return -EINVAL;
+
+       ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+                          RTL8366RB_PHY_CTRL_READ);
+       if (ret)
+               return ret;
+
+       reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
+
+       ret = regmap_write(smi->map, reg, 0);
+       if (ret) {
+               dev_err(smi->dev,
+                       "failed to write PHY%d reg %04x @ %04x, ret %d\n",
+                       phy, regnum, reg, ret);
+               return ret;
+       }
+
+       ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
+       if (ret)
+               return ret;
+
+       dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
+               phy, regnum, reg, val);
+
+       return val;
+}
+
+static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+                              u16 val)
+{
+       u32 reg;
+       int ret;
+
+       if (phy > RTL8366RB_PHY_NO_MAX)
+               return -EINVAL;
+
+       ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+                          RTL8366RB_PHY_CTRL_WRITE);
+       if (ret)
+               return ret;
+
+       reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
+
+       dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
+               phy, regnum, reg, val);
+
+       ret = regmap_write(smi->map, reg, val);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int rtl8366rb_reset_chip(struct realtek_smi *smi)
+{
+       int timeout = 10;
+       u32 val;
+       int ret;
+
+       realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
+                                   RTL8366RB_CHIP_CTRL_RESET_HW);
+       do {
+               usleep_range(20000, 25000);
+               ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
+               if (ret)
+                       return ret;
+
+               if (!(val & RTL8366RB_CHIP_CTRL_RESET_HW))
+                       break;
+       } while (--timeout);
+
+       if (!timeout) {
+               dev_err(smi->dev, "timeout waiting for the switch to reset\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int rtl8366rb_detect(struct realtek_smi *smi)
+{
+       struct device *dev = smi->dev;
+       int ret;
+       u32 val;
+
+       /* Detect device */
+       ret = regmap_read(smi->map, 0x5c, &val);
+       if (ret) {
+               dev_err(dev, "can't get chip ID (%d)\n", ret);
+               return ret;
+       }
+
+       switch (val) {
+       case 0x6027:
+               dev_info(dev, "found an RTL8366S switch\n");
+               dev_err(dev, "this switch is not yet supported, submit patches!\n");
+               return -ENODEV;
+       case 0x5937:
+               dev_info(dev, "found an RTL8366RB switch\n");
+               smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
+               smi->num_ports = RTL8366RB_NUM_PORTS;
+               smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
+               smi->mib_counters = rtl8366rb_mib_counters;
+               smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
+               break;
+       default:
+               dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
+                        val);
+               break;
+       }
+
+       ret = rtl8366rb_reset_chip(smi);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static const struct dsa_switch_ops rtl8366rb_switch_ops = {
+       .get_tag_protocol = rtl8366_get_tag_protocol,
+       .setup = rtl8366rb_setup,
+       .adjust_link = rtl8366rb_adjust_link,
+       .get_strings = rtl8366_get_strings,
+       .get_ethtool_stats = rtl8366_get_ethtool_stats,
+       .get_sset_count = rtl8366_get_sset_count,
+       .port_vlan_filtering = rtl8366_vlan_filtering,
+       .port_vlan_prepare = rtl8366_vlan_prepare,
+       .port_vlan_add = rtl8366_vlan_add,
+       .port_vlan_del = rtl8366_vlan_del,
+       .port_enable = rtl8366rb_port_enable,
+       .port_disable = rtl8366rb_port_disable,
+};
+
+static const struct realtek_smi_ops rtl8366rb_smi_ops = {
+       .detect         = rtl8366rb_detect,
+       .get_vlan_mc    = rtl8366rb_get_vlan_mc,
+       .set_vlan_mc    = rtl8366rb_set_vlan_mc,
+       .get_vlan_4k    = rtl8366rb_get_vlan_4k,
+       .set_vlan_4k    = rtl8366rb_set_vlan_4k,
+       .get_mc_index   = rtl8366rb_get_mc_index,
+       .set_mc_index   = rtl8366rb_set_mc_index,
+       .get_mib_counter = rtl8366rb_get_mib_counter,
+       .is_vlan_valid  = rtl8366rb_is_vlan_valid,
+       .enable_vlan    = rtl8366rb_enable_vlan,
+       .enable_vlan4k  = rtl8366rb_enable_vlan4k,
+       .phy_read       = rtl8366rb_phy_read,
+       .phy_write      = rtl8366rb_phy_write,
+};
+
+const struct realtek_smi_variant rtl8366rb_variant = {
+       .ds_ops = &rtl8366rb_switch_ops,
+       .ops = &rtl8366rb_smi_ops,
+       .clk_delay = 10,
+       .cmd_read = 0xa9,
+       .cmd_write = 0xa8,
+};
+EXPORT_SYMBOL_GPL(rtl8366rb_variant);
diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c
new file mode 100644 (file)
index 0000000..9f1b5f2
--- /dev/null
@@ -0,0 +1,1365 @@
+// SPDX-License-Identifier: GPL-2.0
+/* DSA driver for:
+ * Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch
+ * Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch
+ *
+ * These switches have a built-in 8051 CPU and can download and execute a
+ * firmware in this CPU. They can also be configured to use an external CPU
+ * handling the switch in a memory-mapped manner by connecting to that external
+ * CPU's memory bus.
+ *
+ * This driver (currently) only takes control of the switch chip over SPI and
+ * configures it to route packages around when connected to a CPU port. The
+ * chip has embedded PHYs and VLAN support so we model it using DSA.
+ *
+ * Copyright (C) 2018 Linus Wallej <linus.walleij@linaro.org>
+ * Includes portions of code from the firmware uploader by:
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+#include <linux/etherdevice.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/random.h>
+#include <net/dsa.h>
+
+#define VSC73XX_BLOCK_MAC      0x1 /* Subblocks 0-4, 6 (CPU port) */
+#define VSC73XX_BLOCK_ANALYZER 0x2 /* Only subblock 0 */
+#define VSC73XX_BLOCK_MII      0x3 /* Subblocks 0 and 1 */
+#define VSC73XX_BLOCK_MEMINIT  0x3 /* Only subblock 2 */
+#define VSC73XX_BLOCK_CAPTURE  0x4 /* Only subblock 2 */
+#define VSC73XX_BLOCK_ARBITER  0x5 /* Only subblock 0 */
+#define VSC73XX_BLOCK_SYSTEM   0x7 /* Only subblock 0 */
+
+#define CPU_PORT       6 /* CPU port */
+
+/* MAC Block registers */
+#define VSC73XX_MAC_CFG                0x00
+#define VSC73XX_MACHDXGAP      0x02
+#define VSC73XX_FCCONF         0x04
+#define VSC73XX_FCMACHI                0x08
+#define VSC73XX_FCMACLO                0x0c
+#define VSC73XX_MAXLEN         0x10
+#define VSC73XX_ADVPORTM       0x19
+#define VSC73XX_TXUPDCFG       0x24
+#define VSC73XX_TXQ_SELECT_CFG 0x28
+#define VSC73XX_RXOCT          0x50
+#define VSC73XX_TXOCT          0x51
+#define VSC73XX_C_RX0          0x52
+#define VSC73XX_C_RX1          0x53
+#define VSC73XX_C_RX2          0x54
+#define VSC73XX_C_TX0          0x55
+#define VSC73XX_C_TX1          0x56
+#define VSC73XX_C_TX2          0x57
+#define VSC73XX_C_CFG          0x58
+#define VSC73XX_CAT_DROP       0x6e
+#define VSC73XX_CAT_PR_MISC_L2 0x6f
+#define VSC73XX_CAT_PR_USR_PRIO        0x75
+#define VSC73XX_Q_MISC_CONF    0xdf
+
+/* MAC_CFG register bits */
+#define VSC73XX_MAC_CFG_WEXC_DIS       BIT(31)
+#define VSC73XX_MAC_CFG_PORT_RST       BIT(29)
+#define VSC73XX_MAC_CFG_TX_EN          BIT(28)
+#define VSC73XX_MAC_CFG_SEED_LOAD      BIT(27)
+#define VSC73XX_MAC_CFG_SEED_MASK      GENMASK(26, 19)
+#define VSC73XX_MAC_CFG_SEED_OFFSET    19
+#define VSC73XX_MAC_CFG_FDX            BIT(18)
+#define VSC73XX_MAC_CFG_GIGA_MODE      BIT(17)
+#define VSC73XX_MAC_CFG_RX_EN          BIT(16)
+#define VSC73XX_MAC_CFG_VLAN_DBLAWR    BIT(15)
+#define VSC73XX_MAC_CFG_VLAN_AWR       BIT(14)
+#define VSC73XX_MAC_CFG_100_BASE_T     BIT(13) /* Not in manual */
+#define VSC73XX_MAC_CFG_TX_IPG_MASK    GENMASK(10, 6)
+#define VSC73XX_MAC_CFG_TX_IPG_OFFSET  6
+#define VSC73XX_MAC_CFG_TX_IPG_1000M   (6 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
+#define VSC73XX_MAC_CFG_TX_IPG_100_10M (17 << VSC73XX_MAC_CFG_TX_IPG_OFFSET)
+#define VSC73XX_MAC_CFG_MAC_RX_RST     BIT(5)
+#define VSC73XX_MAC_CFG_MAC_TX_RST     BIT(4)
+#define VSC73XX_MAC_CFG_CLK_SEL_MASK   GENMASK(2, 0)
+#define VSC73XX_MAC_CFG_CLK_SEL_OFFSET 0
+#define VSC73XX_MAC_CFG_CLK_SEL_1000M  1
+#define VSC73XX_MAC_CFG_CLK_SEL_100M   2
+#define VSC73XX_MAC_CFG_CLK_SEL_10M    3
+#define VSC73XX_MAC_CFG_CLK_SEL_EXT    4
+
+#define VSC73XX_MAC_CFG_1000M_F_PHY    (VSC73XX_MAC_CFG_FDX | \
+                                        VSC73XX_MAC_CFG_GIGA_MODE | \
+                                        VSC73XX_MAC_CFG_TX_IPG_1000M | \
+                                        VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_100_10M_F_PHY  (VSC73XX_MAC_CFG_FDX | \
+                                        VSC73XX_MAC_CFG_TX_IPG_100_10M | \
+                                        VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_100_10M_H_PHY  (VSC73XX_MAC_CFG_TX_IPG_100_10M | \
+                                        VSC73XX_MAC_CFG_CLK_SEL_EXT)
+#define VSC73XX_MAC_CFG_1000M_F_RGMII  (VSC73XX_MAC_CFG_FDX | \
+                                        VSC73XX_MAC_CFG_GIGA_MODE | \
+                                        VSC73XX_MAC_CFG_TX_IPG_1000M | \
+                                        VSC73XX_MAC_CFG_CLK_SEL_1000M)
+#define VSC73XX_MAC_CFG_RESET          (VSC73XX_MAC_CFG_PORT_RST | \
+                                        VSC73XX_MAC_CFG_MAC_RX_RST | \
+                                        VSC73XX_MAC_CFG_MAC_TX_RST)
+
+/* Flow control register bits */
+#define VSC73XX_FCCONF_ZERO_PAUSE_EN   BIT(17)
+#define VSC73XX_FCCONF_FLOW_CTRL_OBEY  BIT(16)
+#define VSC73XX_FCCONF_PAUSE_VAL_MASK  GENMASK(15, 0)
+
+/* ADVPORTM advanced port setup register bits */
+#define VSC73XX_ADVPORTM_IFG_PPM       BIT(7)
+#define VSC73XX_ADVPORTM_EXC_COL_CONT  BIT(6)
+#define VSC73XX_ADVPORTM_EXT_PORT      BIT(5)
+#define VSC73XX_ADVPORTM_INV_GTX       BIT(4)
+#define VSC73XX_ADVPORTM_ENA_GTX       BIT(3)
+#define VSC73XX_ADVPORTM_DDR_MODE      BIT(2)
+#define VSC73XX_ADVPORTM_IO_LOOPBACK   BIT(1)
+#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0)
+
+/* CAT_DROP categorizer frame dropping register bits */
+#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA      BIT(6)
+#define VSC73XX_CAT_DROP_FWD_CTRL_ENA          BIT(4)
+#define VSC73XX_CAT_DROP_FWD_PAUSE_ENA         BIT(3)
+#define VSC73XX_CAT_DROP_UNTAGGED_ENA          BIT(2)
+#define VSC73XX_CAT_DROP_TAGGED_ENA            BIT(1)
+#define VSC73XX_CAT_DROP_NULL_MAC_ENA          BIT(0)
+
+#define VSC73XX_Q_MISC_CONF_EXTENT_MEM         BIT(31)
+#define VSC73XX_Q_MISC_CONF_EARLY_TX_MASK      GENMASK(4, 1)
+#define VSC73XX_Q_MISC_CONF_EARLY_TX_512       (1 << 1)
+#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE     BIT(0)
+
+/* Frame analyzer block 2 registers */
+#define VSC73XX_STORMLIMIT     0x02
+#define VSC73XX_ADVLEARN       0x03
+#define VSC73XX_IFLODMSK       0x04
+#define VSC73XX_VLANMASK       0x05
+#define VSC73XX_MACHDATA       0x06
+#define VSC73XX_MACLDATA       0x07
+#define VSC73XX_ANMOVED                0x08
+#define VSC73XX_ANAGEFIL       0x09
+#define VSC73XX_ANEVENTS       0x0a
+#define VSC73XX_ANCNTMASK      0x0b
+#define VSC73XX_ANCNTVAL       0x0c
+#define VSC73XX_LEARNMASK      0x0d
+#define VSC73XX_UFLODMASK      0x0e
+#define VSC73XX_MFLODMASK      0x0f
+#define VSC73XX_RECVMASK       0x10
+#define VSC73XX_AGGRCTRL       0x20
+#define VSC73XX_AGGRMSKS       0x30 /* Until 0x3f */
+#define VSC73XX_DSTMASKS       0x40 /* Until 0x7f */
+#define VSC73XX_SRCMASKS       0x80 /* Until 0x87 */
+#define VSC73XX_CAPENAB                0xa0
+#define VSC73XX_MACACCESS      0xb0
+#define VSC73XX_IPMCACCESS     0xb1
+#define VSC73XX_MACTINDX       0xc0
+#define VSC73XX_VLANACCESS     0xd0
+#define VSC73XX_VLANTIDX       0xe0
+#define VSC73XX_AGENCTRL       0xf0
+#define VSC73XX_CAPRST         0xff
+
+#define VSC73XX_MACACCESS_CPU_COPY             BIT(14)
+#define VSC73XX_MACACCESS_FWD_KILL             BIT(13)
+#define VSC73XX_MACACCESS_IGNORE_VLAN          BIT(12)
+#define VSC73XX_MACACCESS_AGED_FLAG            BIT(11)
+#define VSC73XX_MACACCESS_VALID                        BIT(10)
+#define VSC73XX_MACACCESS_LOCKED               BIT(9)
+#define VSC73XX_MACACCESS_DEST_IDX_MASK                GENMASK(8, 3)
+#define VSC73XX_MACACCESS_CMD_MASK             GENMASK(2, 0)
+#define VSC73XX_MACACCESS_CMD_IDLE             0
+#define VSC73XX_MACACCESS_CMD_LEARN            1
+#define VSC73XX_MACACCESS_CMD_FORGET           2
+#define VSC73XX_MACACCESS_CMD_AGE_TABLE                3
+#define VSC73XX_MACACCESS_CMD_FLUSH_TABLE      4
+#define VSC73XX_MACACCESS_CMD_CLEAR_TABLE      5
+#define VSC73XX_MACACCESS_CMD_READ_ENTRY       6
+#define VSC73XX_MACACCESS_CMD_WRITE_ENTRY      7
+
+#define VSC73XX_VLANACCESS_LEARN_DISABLED      BIT(30)
+#define VSC73XX_VLANACCESS_VLAN_MIRROR         BIT(29)
+#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK      BIT(28)
+#define VSC73XX_VLANACCESS_VLAN_PORT_MASK      GENMASK(9, 2)
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK   GENMASK(2, 0)
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE   0
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY     1
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY    2
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE    3
+
+/* MII block 3 registers */
+#define VSC73XX_MII_STAT       0x0
+#define VSC73XX_MII_CMD                0x1
+#define VSC73XX_MII_DATA       0x2
+
+/* Arbiter block 5 registers */
+#define VSC73XX_ARBEMPTY               0x0c
+#define VSC73XX_ARBDISC                        0x0e
+#define VSC73XX_SBACKWDROP             0x12
+#define VSC73XX_DBACKWDROP             0x13
+#define VSC73XX_ARBBURSTPROB           0x15
+
+/* System block 7 registers */
+#define VSC73XX_ICPU_SIPAD             0x01
+#define VSC73XX_GMIIDELAY              0x05
+#define VSC73XX_ICPU_CTRL              0x10
+#define VSC73XX_ICPU_ADDR              0x11
+#define VSC73XX_ICPU_SRAM              0x12
+#define VSC73XX_HWSEM                  0x13
+#define VSC73XX_GLORESET               0x14
+#define VSC73XX_ICPU_MBOX_VAL          0x15
+#define VSC73XX_ICPU_MBOX_SET          0x16
+#define VSC73XX_ICPU_MBOX_CLR          0x17
+#define VSC73XX_CHIPID                 0x18
+#define VSC73XX_GPIO                   0x34
+
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE  0
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS        1
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS        2
+#define VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS        3
+
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE   (0 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS (1 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS (2 << 4)
+#define VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS (3 << 4)
+
+#define VSC73XX_ICPU_CTRL_WATCHDOG_RST BIT(31)
+#define VSC73XX_ICPU_CTRL_CLK_DIV_MASK GENMASK(12, 8)
+#define VSC73XX_ICPU_CTRL_SRST_HOLD    BIT(7)
+#define VSC73XX_ICPU_CTRL_ICPU_PI_EN   BIT(6)
+#define VSC73XX_ICPU_CTRL_BOOT_EN      BIT(3)
+#define VSC73XX_ICPU_CTRL_EXT_ACC_EN   BIT(2)
+#define VSC73XX_ICPU_CTRL_CLK_EN       BIT(1)
+#define VSC73XX_ICPU_CTRL_SRST         BIT(0)
+
+#define VSC73XX_CHIPID_ID_SHIFT                12
+#define VSC73XX_CHIPID_ID_MASK         0xffff
+#define VSC73XX_CHIPID_REV_SHIFT       28
+#define VSC73XX_CHIPID_REV_MASK                0xf
+#define VSC73XX_CHIPID_ID_7385         0x7385
+#define VSC73XX_CHIPID_ID_7388         0x7388
+#define VSC73XX_CHIPID_ID_7395         0x7395
+#define VSC73XX_CHIPID_ID_7398         0x7398
+
+#define VSC73XX_GLORESET_STROBE                BIT(4)
+#define VSC73XX_GLORESET_ICPU_LOCK     BIT(3)
+#define VSC73XX_GLORESET_MEM_LOCK      BIT(2)
+#define VSC73XX_GLORESET_PHY_RESET     BIT(1)
+#define VSC73XX_GLORESET_MASTER_RESET  BIT(0)
+
+#define VSC73XX_CMD_MODE_READ          0
+#define VSC73XX_CMD_MODE_WRITE         1
+#define VSC73XX_CMD_MODE_SHIFT         4
+#define VSC73XX_CMD_BLOCK_SHIFT                5
+#define VSC73XX_CMD_BLOCK_MASK         0x7
+#define VSC73XX_CMD_SUBBLOCK_MASK      0xf
+
+#define VSC7385_CLOCK_DELAY            ((3 << 4) | 3)
+#define VSC7385_CLOCK_DELAY_MASK       ((3 << 4) | 3)
+
+#define VSC73XX_ICPU_CTRL_STOP (VSC73XX_ICPU_CTRL_SRST_HOLD | \
+                                VSC73XX_ICPU_CTRL_BOOT_EN | \
+                                VSC73XX_ICPU_CTRL_EXT_ACC_EN)
+
+#define VSC73XX_ICPU_CTRL_START        (VSC73XX_ICPU_CTRL_CLK_DIV | \
+                                VSC73XX_ICPU_CTRL_BOOT_EN | \
+                                VSC73XX_ICPU_CTRL_CLK_EN | \
+                                VSC73XX_ICPU_CTRL_SRST)
+
+/**
+ * struct vsc73xx - VSC73xx state container
+ */
+struct vsc73xx {
+       struct device           *dev;
+       struct gpio_desc        *reset;
+       struct spi_device       *spi;
+       struct dsa_switch       *ds;
+       struct gpio_chip        gc;
+       u16                     chipid;
+       u8                      addr[ETH_ALEN];
+       struct mutex            lock; /* Protects SPI traffic */
+};
+
+#define IS_7385(a) ((a)->chipid == VSC73XX_CHIPID_ID_7385)
+#define IS_7388(a) ((a)->chipid == VSC73XX_CHIPID_ID_7388)
+#define IS_7395(a) ((a)->chipid == VSC73XX_CHIPID_ID_7395)
+#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
+#define IS_739X(a) (IS_7395(a) || IS_7398(a))
+
+struct vsc73xx_counter {
+       u8 counter;
+       const char *name;
+};
+
+/* Counters are named according to the MIB standards where applicable.
+ * Some counters are custom, non-standard. The standard counters are
+ * named in accordance with RFC2819, RFC2021 and IEEE Std 802.3-2002 Annex
+ * 30A Counters.
+ */
+static const struct vsc73xx_counter vsc73xx_rx_counters[] = {
+       { 0, "RxEtherStatsPkts" },
+       { 1, "RxBroadcast+MulticastPkts" }, /* non-standard counter */
+       { 2, "RxTotalErrorPackets" }, /* non-standard counter */
+       { 3, "RxEtherStatsBroadcastPkts" },
+       { 4, "RxEtherStatsMulticastPkts" },
+       { 5, "RxEtherStatsPkts64Octets" },
+       { 6, "RxEtherStatsPkts65to127Octets" },
+       { 7, "RxEtherStatsPkts128to255Octets" },
+       { 8, "RxEtherStatsPkts256to511Octets" },
+       { 9, "RxEtherStatsPkts512to1023Octets" },
+       { 10, "RxEtherStatsPkts1024to1518Octets" },
+       { 11, "RxJumboFrames" }, /* non-standard counter */
+       { 12, "RxaPauseMACControlFramesTransmitted" },
+       { 13, "RxFIFODrops" }, /* non-standard counter */
+       { 14, "RxBackwardDrops" }, /* non-standard counter */
+       { 15, "RxClassifierDrops" }, /* non-standard counter */
+       { 16, "RxEtherStatsCRCAlignErrors" },
+       { 17, "RxEtherStatsUndersizePkts" },
+       { 18, "RxEtherStatsOversizePkts" },
+       { 19, "RxEtherStatsFragments" },
+       { 20, "RxEtherStatsJabbers" },
+       { 21, "RxaMACControlFramesReceived" },
+       /* 22-24 are undefined */
+       { 25, "RxaFramesReceivedOK" },
+       { 26, "RxQoSClass0" }, /* non-standard counter */
+       { 27, "RxQoSClass1" }, /* non-standard counter */
+       { 28, "RxQoSClass2" }, /* non-standard counter */
+       { 29, "RxQoSClass3" }, /* non-standard counter */
+};
+
+static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
+       { 0, "TxEtherStatsPkts" },
+       { 1, "TxBroadcast+MulticastPkts" }, /* non-standard counter */
+       { 2, "TxTotalErrorPackets" }, /* non-standard counter */
+       { 3, "TxEtherStatsBroadcastPkts" },
+       { 4, "TxEtherStatsMulticastPkts" },
+       { 5, "TxEtherStatsPkts64Octets" },
+       { 6, "TxEtherStatsPkts65to127Octets" },
+       { 7, "TxEtherStatsPkts128to255Octets" },
+       { 8, "TxEtherStatsPkts256to511Octets" },
+       { 9, "TxEtherStatsPkts512to1023Octets" },
+       { 10, "TxEtherStatsPkts1024to1518Octets" },
+       { 11, "TxJumboFrames" }, /* non-standard counter */
+       { 12, "TxaPauseMACControlFramesTransmitted" },
+       { 13, "TxFIFODrops" }, /* non-standard counter */
+       { 14, "TxDrops" }, /* non-standard counter */
+       { 15, "TxEtherStatsCollisions" },
+       { 16, "TxEtherStatsCRCAlignErrors" },
+       { 17, "TxEtherStatsUndersizePkts" },
+       { 18, "TxEtherStatsOversizePkts" },
+       { 19, "TxEtherStatsFragments" },
+       { 20, "TxEtherStatsJabbers" },
+       /* 21-24 are undefined */
+       { 25, "TxaFramesReceivedOK" },
+       { 26, "TxQoSClass0" }, /* non-standard counter */
+       { 27, "TxQoSClass1" }, /* non-standard counter */
+       { 28, "TxQoSClass2" }, /* non-standard counter */
+       { 29, "TxQoSClass3" }, /* non-standard counter */
+};
+
+static int vsc73xx_is_addr_valid(u8 block, u8 subblock)
+{
+       switch (block) {
+       case VSC73XX_BLOCK_MAC:
+               switch (subblock) {
+               case 0 ... 4:
+               case 6:
+                       return 1;
+               }
+               break;
+
+       case VSC73XX_BLOCK_ANALYZER:
+       case VSC73XX_BLOCK_SYSTEM:
+               switch (subblock) {
+               case 0:
+                       return 1;
+               }
+               break;
+
+       case VSC73XX_BLOCK_MII:
+       case VSC73XX_BLOCK_CAPTURE:
+       case VSC73XX_BLOCK_ARBITER:
+               switch (subblock) {
+               case 0 ... 1:
+                       return 1;
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static u8 vsc73xx_make_addr(u8 mode, u8 block, u8 subblock)
+{
+       u8 ret;
+
+       ret = (block & VSC73XX_CMD_BLOCK_MASK) << VSC73XX_CMD_BLOCK_SHIFT;
+       ret |= (mode & 1) << VSC73XX_CMD_MODE_SHIFT;
+       ret |= subblock & VSC73XX_CMD_SUBBLOCK_MASK;
+
+       return ret;
+}
+
+static int vsc73xx_read(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+                       u32 *val)
+{
+       struct spi_transfer t[2];
+       struct spi_message m;
+       u8 cmd[4];
+       u8 buf[4];
+       int ret;
+
+       if (!vsc73xx_is_addr_valid(block, subblock))
+               return -EINVAL;
+
+       spi_message_init(&m);
+
+       memset(&t, 0, sizeof(t));
+
+       t[0].tx_buf = cmd;
+       t[0].len = sizeof(cmd);
+       spi_message_add_tail(&t[0], &m);
+
+       t[1].rx_buf = buf;
+       t[1].len = sizeof(buf);
+       spi_message_add_tail(&t[1], &m);
+
+       cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_READ, block, subblock);
+       cmd[1] = reg;
+       cmd[2] = 0;
+       cmd[3] = 0;
+
+       mutex_lock(&vsc->lock);
+       ret = spi_sync(vsc->spi, &m);
+       mutex_unlock(&vsc->lock);
+
+       if (ret)
+               return ret;
+
+       *val = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+
+       return 0;
+}
+
+static int vsc73xx_write(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
+                        u32 val)
+{
+       struct spi_transfer t[2];
+       struct spi_message m;
+       u8 cmd[2];
+       u8 buf[4];
+       int ret;
+
+       if (!vsc73xx_is_addr_valid(block, subblock))
+               return -EINVAL;
+
+       spi_message_init(&m);
+
+       memset(&t, 0, sizeof(t));
+
+       t[0].tx_buf = cmd;
+       t[0].len = sizeof(cmd);
+       spi_message_add_tail(&t[0], &m);
+
+       t[1].tx_buf = buf;
+       t[1].len = sizeof(buf);
+       spi_message_add_tail(&t[1], &m);
+
+       cmd[0] = vsc73xx_make_addr(VSC73XX_CMD_MODE_WRITE, block, subblock);
+       cmd[1] = reg;
+
+       buf[0] = (val >> 24) & 0xff;
+       buf[1] = (val >> 16) & 0xff;
+       buf[2] = (val >> 8) & 0xff;
+       buf[3] = val & 0xff;
+
+       mutex_lock(&vsc->lock);
+       ret = spi_sync(vsc->spi, &m);
+       mutex_unlock(&vsc->lock);
+
+       return ret;
+}
+
+static int vsc73xx_update_bits(struct vsc73xx *vsc, u8 block, u8 subblock,
+                              u8 reg, u32 mask, u32 val)
+{
+       u32 tmp, orig;
+       int ret;
+
+       /* Same read-modify-write algorithm as e.g. regmap */
+       ret = vsc73xx_read(vsc, block, subblock, reg, &orig);
+       if (ret)
+               return ret;
+       tmp = orig & ~mask;
+       tmp |= val & mask;
+       return vsc73xx_write(vsc, block, subblock, reg, tmp);
+}
+
+static int vsc73xx_detect(struct vsc73xx *vsc)
+{
+       bool icpu_si_boot_en;
+       bool icpu_pi_en;
+       u32 val;
+       u32 rev;
+       int ret;
+       u32 id;
+
+       ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+                          VSC73XX_ICPU_MBOX_VAL, &val);
+       if (ret) {
+               dev_err(vsc->dev, "unable to read mailbox (%d)\n", ret);
+               return ret;
+       }
+
+       if (val == 0xffffffff) {
+               dev_info(vsc->dev, "chip seems dead, assert reset\n");
+               gpiod_set_value_cansleep(vsc->reset, 1);
+               /* Reset pulse should be 20ns minimum, according to datasheet
+                * table 245, so 10us should be fine
+                */
+               usleep_range(10, 100);
+               gpiod_set_value_cansleep(vsc->reset, 0);
+               /* Wait 20ms according to datasheet table 245 */
+               msleep(20);
+
+               ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+                                  VSC73XX_ICPU_MBOX_VAL, &val);
+               if (val == 0xffffffff) {
+                       dev_err(vsc->dev, "seems not to help, giving up\n");
+                       return -ENODEV;
+               }
+       }
+
+       ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+                          VSC73XX_CHIPID, &val);
+       if (ret) {
+               dev_err(vsc->dev, "unable to read chip id (%d)\n", ret);
+               return ret;
+       }
+
+       id = (val >> VSC73XX_CHIPID_ID_SHIFT) &
+               VSC73XX_CHIPID_ID_MASK;
+       switch (id) {
+       case VSC73XX_CHIPID_ID_7385:
+       case VSC73XX_CHIPID_ID_7388:
+       case VSC73XX_CHIPID_ID_7395:
+       case VSC73XX_CHIPID_ID_7398:
+               break;
+       default:
+               dev_err(vsc->dev, "unsupported chip, id=%04x\n", id);
+               return -ENODEV;
+       }
+
+       vsc->chipid = id;
+       rev = (val >> VSC73XX_CHIPID_REV_SHIFT) &
+               VSC73XX_CHIPID_REV_MASK;
+       dev_info(vsc->dev, "VSC%04X (rev: %d) switch found\n", id, rev);
+
+       ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+                          VSC73XX_ICPU_CTRL, &val);
+       if (ret) {
+               dev_err(vsc->dev, "unable to read iCPU control\n");
+               return ret;
+       }
+
+       /* The iCPU can always be used but can boot in different ways.
+        * If it is initially disabled and has no external memory,
+        * we are in control and can do whatever we like, else we
+        * are probably in trouble (we need some way to communicate
+        * with the running firmware) so we bail out for now.
+        */
+       icpu_pi_en = !!(val & VSC73XX_ICPU_CTRL_ICPU_PI_EN);
+       icpu_si_boot_en = !!(val & VSC73XX_ICPU_CTRL_BOOT_EN);
+       if (icpu_si_boot_en && icpu_pi_en) {
+               dev_err(vsc->dev,
+                       "iCPU enabled boots from SI, has external memory\n");
+               dev_err(vsc->dev, "no idea how to deal with this\n");
+               return -ENODEV;
+       }
+       if (icpu_si_boot_en && !icpu_pi_en) {
+               dev_err(vsc->dev,
+                       "iCPU enabled boots from SI, no external memory\n");
+               dev_err(vsc->dev, "no idea how to deal with this\n");
+               return -ENODEV;
+       }
+       if (!icpu_si_boot_en && icpu_pi_en) {
+               dev_err(vsc->dev,
+                       "iCPU enabled, boots from PI external memory\n");
+               dev_err(vsc->dev, "no idea how to deal with this\n");
+               return -ENODEV;
+       }
+       /* !icpu_si_boot_en && !cpu_pi_en */
+       dev_info(vsc->dev, "iCPU disabled, no external memory\n");
+
+       return 0;
+}
+
+static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+       struct vsc73xx *vsc = ds->priv;
+       u32 cmd;
+       u32 val;
+       int ret;
+
+       /* Setting bit 26 means "read" */
+       cmd = BIT(26) | (phy << 21) | (regnum << 16);
+       ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+       if (ret)
+               return ret;
+       msleep(2);
+       ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+       if (ret)
+               return ret;
+       if (val & BIT(16)) {
+               dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
+                       regnum, phy);
+               return -EIO;
+       }
+       val &= 0xFFFFU;
+
+       dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
+               regnum, phy, val);
+
+       return val;
+}
+
+static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
+                            u16 val)
+{
+       struct vsc73xx *vsc = ds->priv;
+       u32 cmd;
+       int ret;
+
+       /* It was found through tedious experiments that this router
+        * chip really hates to have it's PHYs reset. They
+        * never recover if that happens: autonegotiation stops
+        * working after a reset. Just filter out this command.
+        * (Resetting the whole chip is OK.)
+        */
+       if (regnum == 0 && (val & BIT(15))) {
+               dev_info(vsc->dev, "reset PHY - disallowed\n");
+               return 0;
+       }
+
+       cmd = (phy << 21) | (regnum << 16);
+       ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+       if (ret)
+               return ret;
+
+       dev_dbg(vsc->dev, "write %04x to reg %02x in phy%d\n",
+               val, regnum, phy);
+       return 0;
+}
+
+static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
+                                                     int port)
+{
+       /* The switch internally uses a 8 byte header with length,
+        * source port, tag, LPA and priority. This is supposedly
+        * only accessible when operating the switch using the internal
+        * CPU or with an external CPU mapping the device in, but not
+        * when operating the switch over SPI and putting frames in/out
+        * on port 6 (the CPU port). So far we must assume that we
+        * cannot access the tag. (See "Internal frame header" section
+        * 3.9.1 in the manual.)
+        */
+       return DSA_TAG_PROTO_NONE;
+}
+
+static int vsc73xx_setup(struct dsa_switch *ds)
+{
+       struct vsc73xx *vsc = ds->priv;
+       int i;
+
+       dev_info(vsc->dev, "set up the switch\n");
+
+       /* Issue RESET */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
+                     VSC73XX_GLORESET_MASTER_RESET);
+       usleep_range(125, 200);
+
+       /* Initialize memory, initialize RAM bank 0..15 except 6 and 7
+        * This sequence appears in the
+        * VSC7385 SparX-G5 datasheet section 6.6.1
+        * VSC7395 SparX-G5e datasheet section 6.6.1
+        * "initialization sequence".
+        * No explanation is given to the 0x1010400 magic number.
+        */
+       for (i = 0; i <= 15; i++) {
+               if (i != 6 && i != 7) {
+                       vsc73xx_write(vsc, VSC73XX_BLOCK_MEMINIT,
+                                     2,
+                                     0, 0x1010400 + i);
+                       mdelay(1);
+               }
+       }
+       mdelay(30);
+
+       /* Clear MAC table */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+                     VSC73XX_MACACCESS,
+                     VSC73XX_MACACCESS_CMD_CLEAR_TABLE);
+
+       /* Clear VLAN table */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+                     VSC73XX_VLANACCESS,
+                     VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE);
+
+       msleep(40);
+
+       /* Use 20KiB buffers on all ports on VSC7395
+        * The VSC7385 has 16KiB buffers and that is the
+        * default if we don't set this up explicitly.
+        * Port "31" is "all ports".
+        */
+       if (IS_739X(vsc))
+               vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 0x1f,
+                             VSC73XX_Q_MISC_CONF,
+                             VSC73XX_Q_MISC_CONF_EXTENT_MEM);
+
+       /* Put all ports into reset until enabled */
+       for (i = 0; i < 7; i++) {
+               if (i == 5)
+                       continue;
+               vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, 4,
+                             VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
+       }
+
+       /* MII delay, set both GTX and RX delay to 2 ns */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
+                     VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
+                     VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
+       /* Enable reception of frames on all ports */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK,
+                     0x5f);
+       /* IP multicast flood mask (table 144) */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK,
+                     0xff);
+
+       mdelay(50);
+
+       /* Release reset from the internal PHYs */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
+                     VSC73XX_GLORESET_PHY_RESET);
+
+       udelay(4);
+
+       return 0;
+}
+
+static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
+{
+       u32 val;
+
+       /* MAC configure, first reset the port and then write defaults */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+                     port,
+                     VSC73XX_MAC_CFG,
+                     VSC73XX_MAC_CFG_RESET);
+
+       /* Take up the port in 1Gbit mode by default, this will be
+        * augmented after auto-negotiation on the PHY-facing
+        * ports.
+        */
+       if (port == CPU_PORT)
+               val = VSC73XX_MAC_CFG_1000M_F_RGMII;
+       else
+               val = VSC73XX_MAC_CFG_1000M_F_PHY;
+
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+                     port,
+                     VSC73XX_MAC_CFG,
+                     val |
+                     VSC73XX_MAC_CFG_TX_EN |
+                     VSC73XX_MAC_CFG_RX_EN);
+
+       /* Max length, we can do up to 9.6 KiB, so allow that.
+        * According to application not "VSC7398 Jumbo Frames" setting
+        * up the MTU to 9.6 KB does not affect the performance on standard
+        * frames, so just enable it. It is clear from the application note
+        * that "9.6 kilobytes" == 9600 bytes.
+        */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+                     port,
+                     VSC73XX_MAXLEN, 9600);
+
+       /* Flow control for the CPU port:
+        * Use a zero delay pause frame when pause condition is left
+        * Obey pause control frames
+        */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+                     port,
+                     VSC73XX_FCCONF,
+                     VSC73XX_FCCONF_ZERO_PAUSE_EN |
+                     VSC73XX_FCCONF_FLOW_CTRL_OBEY);
+
+       /* Issue pause control frames on PHY facing ports.
+        * Allow early initiation of MAC transmission if the amount
+        * of egress data is below 512 bytes on CPU port.
+        * FIXME: enable 20KiB buffers?
+        */
+       if (port == CPU_PORT)
+               val = VSC73XX_Q_MISC_CONF_EARLY_TX_512;
+       else
+               val = VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE;
+       val |= VSC73XX_Q_MISC_CONF_EXTENT_MEM;
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+                     port,
+                     VSC73XX_Q_MISC_CONF,
+                     val);
+
+       /* Flow control MAC: a MAC address used in flow control frames */
+       val = (vsc->addr[5] << 16) | (vsc->addr[4] << 8) | (vsc->addr[3]);
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+                     port,
+                     VSC73XX_FCMACHI,
+                     val);
+       val = (vsc->addr[2] << 16) | (vsc->addr[1] << 8) | (vsc->addr[0]);
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+                     port,
+                     VSC73XX_FCMACLO,
+                     val);
+
+       /* Tell the categorizer to forward pause frames, not control
+        * frame. Do not drop anything.
+        */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+                     port,
+                     VSC73XX_CAT_DROP,
+                     VSC73XX_CAT_DROP_FWD_PAUSE_ENA);
+
+       /* Clear all counters */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+                     port, VSC73XX_C_RX0, 0);
+}
+
+static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
+                                      int port, struct phy_device *phydev,
+                                      u32 initval)
+{
+       u32 val = initval;
+       u8 seed;
+
+       /* Reset this port FIXME: break out subroutine */
+       val |= VSC73XX_MAC_CFG_RESET;
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+
+       /* Seed the port randomness with randomness */
+       get_random_bytes(&seed, 1);
+       val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
+       val |= VSC73XX_MAC_CFG_SEED_LOAD;
+       val |= VSC73XX_MAC_CFG_WEXC_DIS;
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+
+       /* Flow control for the PHY facing ports:
+        * Use a zero delay pause frame when pause condition is left
+        * Obey pause control frames
+        * When generating pause frames, use 0xff as pause value
+        */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF,
+                     VSC73XX_FCCONF_ZERO_PAUSE_EN |
+                     VSC73XX_FCCONF_FLOW_CTRL_OBEY |
+                     0xff);
+
+       /* Disallow backward dropping of frames from this port */
+       vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+                           VSC73XX_SBACKWDROP, BIT(port), 0);
+
+       /* Enable TX, RX, deassert reset, stop loading seed */
+       vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+                           VSC73XX_MAC_CFG,
+                           VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD |
+                           VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN,
+                           VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
+}
+
+static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
+                               struct phy_device *phydev)
+{
+       struct vsc73xx *vsc = ds->priv;
+       u32 val;
+
+       /* Special handling of the CPU-facing port */
+       if (port == CPU_PORT) {
+               /* Other ports are already initialized but not this one */
+               vsc73xx_init_port(vsc, CPU_PORT);
+               /* Select the external port for this interface (EXT_PORT)
+                * Enable the GMII GTX external clock
+                * Use double data rate (DDR mode)
+                */
+               vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+                             CPU_PORT,
+                             VSC73XX_ADVPORTM,
+                             VSC73XX_ADVPORTM_EXT_PORT |
+                             VSC73XX_ADVPORTM_ENA_GTX |
+                             VSC73XX_ADVPORTM_DDR_MODE);
+       }
+
+       /* This is the MAC confiuration that always need to happen
+        * after a PHY or the CPU port comes up or down.
+        */
+       if (!phydev->link) {
+               int maxloop = 10;
+
+               dev_dbg(vsc->dev, "port %d: went down\n",
+                       port);
+
+               /* Disable RX on this port */
+               vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+                                   VSC73XX_MAC_CFG,
+                                   VSC73XX_MAC_CFG_RX_EN, 0);
+
+               /* Discard packets */
+               vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+                                   VSC73XX_ARBDISC, BIT(port), BIT(port));
+
+               /* Wait until queue is empty */
+               vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+                            VSC73XX_ARBEMPTY, &val);
+               while (!(val & BIT(port))) {
+                       msleep(1);
+                       vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
+                                    VSC73XX_ARBEMPTY, &val);
+                       if (--maxloop == 0) {
+                               dev_err(vsc->dev,
+                                       "timeout waiting for block arbiter\n");
+                               /* Continue anyway */
+                               break;
+                       }
+               }
+
+               /* Put this port into reset */
+               vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
+                             VSC73XX_MAC_CFG_RESET);
+
+               /* Accept packets again */
+               vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+                                   VSC73XX_ARBDISC, BIT(port), 0);
+
+               /* Allow backward dropping of frames from this port */
+               vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+                                   VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+
+               /* Receive mask (disable forwarding) */
+               vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+                                   VSC73XX_RECVMASK, BIT(port), 0);
+
+               return;
+       }
+
+       /* Figure out what speed was negotiated */
+       if (phydev->speed == SPEED_1000) {
+               dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n",
+                       port);
+
+               /* Set up default for internal port or external RGMII */
+               if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+                       val = VSC73XX_MAC_CFG_1000M_F_RGMII;
+               else
+                       val = VSC73XX_MAC_CFG_1000M_F_PHY;
+               vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+       } else if (phydev->speed == SPEED_100) {
+               if (phydev->duplex == DUPLEX_FULL) {
+                       val = VSC73XX_MAC_CFG_100_10M_F_PHY;
+                       dev_dbg(vsc->dev,
+                               "port %d: 100 Mbit full duplex mode\n",
+                               port);
+               } else {
+                       val = VSC73XX_MAC_CFG_100_10M_H_PHY;
+                       dev_dbg(vsc->dev,
+                               "port %d: 100 Mbit half duplex mode\n",
+                               port);
+               }
+               vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+       } else if (phydev->speed == SPEED_10) {
+               if (phydev->duplex == DUPLEX_FULL) {
+                       val = VSC73XX_MAC_CFG_100_10M_F_PHY;
+                       dev_dbg(vsc->dev,
+                               "port %d: 10 Mbit full duplex mode\n",
+                               port);
+               } else {
+                       val = VSC73XX_MAC_CFG_100_10M_H_PHY;
+                       dev_dbg(vsc->dev,
+                               "port %d: 10 Mbit half duplex mode\n",
+                               port);
+               }
+               vsc73xx_adjust_enable_port(vsc, port, phydev, val);
+       } else {
+               dev_err(vsc->dev,
+                       "could not adjust link: unknown speed\n");
+       }
+
+       /* Enable port (forwarding) in the receieve mask */
+       vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+                           VSC73XX_RECVMASK, BIT(port), BIT(port));
+}
+
+static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
+                              struct phy_device *phy)
+{
+       struct vsc73xx *vsc = ds->priv;
+
+       dev_info(vsc->dev, "enable port %d\n", port);
+       vsc73xx_init_port(vsc, port);
+
+       return 0;
+}
+
+static void vsc73xx_port_disable(struct dsa_switch *ds, int port,
+                                struct phy_device *phy)
+{
+       struct vsc73xx *vsc = ds->priv;
+
+       /* Just put the port into reset */
+       vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
+                     VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
+}
+
+static const struct vsc73xx_counter *
+vsc73xx_find_counter(struct vsc73xx *vsc,
+                    u8 counter,
+                    bool tx)
+{
+       const struct vsc73xx_counter *cnts;
+       int num_cnts;
+       int i;
+
+       if (tx) {
+               cnts = vsc73xx_tx_counters;
+               num_cnts = ARRAY_SIZE(vsc73xx_tx_counters);
+       } else {
+               cnts = vsc73xx_rx_counters;
+               num_cnts = ARRAY_SIZE(vsc73xx_rx_counters);
+       }
+
+       for (i = 0; i < num_cnts; i++) {
+               const struct vsc73xx_counter *cnt;
+
+               cnt = &cnts[i];
+               if (cnt->counter == counter)
+                       return cnt;
+       }
+
+       return NULL;
+}
+
+static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+                               uint8_t *data)
+{
+       const struct vsc73xx_counter *cnt;
+       struct vsc73xx *vsc = ds->priv;
+       u8 indices[6];
+       int i, j;
+       u32 val;
+       int ret;
+
+       if (stringset != ETH_SS_STATS)
+               return;
+
+       ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
+                          VSC73XX_C_CFG, &val);
+       if (ret)
+               return;
+
+       indices[0] = (val & 0x1f); /* RX counter 0 */
+       indices[1] = ((val >> 5) & 0x1f); /* RX counter 1 */
+       indices[2] = ((val >> 10) & 0x1f); /* RX counter 2 */
+       indices[3] = ((val >> 16) & 0x1f); /* TX counter 0 */
+       indices[4] = ((val >> 21) & 0x1f); /* TX counter 1 */
+       indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */
+
+       /* The first counters is the RX octets */
+       j = 0;
+       strncpy(data + j * ETH_GSTRING_LEN,
+               "RxEtherStatsOctets", ETH_GSTRING_LEN);
+       j++;
+
+       /* Each port supports recording 3 RX counters and 3 TX counters,
+        * figure out what counters we use in this set-up and return the
+        * names of them. The hardware default counters will be number of
+        * packets on RX/TX, combined broadcast+multicast packets RX/TX and
+        * total error packets RX/TX.
+        */
+       for (i = 0; i < 3; i++) {
+               cnt = vsc73xx_find_counter(vsc, indices[i], false);
+               if (cnt)
+                       strncpy(data + j * ETH_GSTRING_LEN,
+                               cnt->name, ETH_GSTRING_LEN);
+               j++;
+       }
+
+       /* TX stats begins with the number of TX octets */
+       strncpy(data + j * ETH_GSTRING_LEN,
+               "TxEtherStatsOctets", ETH_GSTRING_LEN);
+       j++;
+
+       for (i = 3; i < 6; i++) {
+               cnt = vsc73xx_find_counter(vsc, indices[i], true);
+               if (cnt)
+                       strncpy(data + j * ETH_GSTRING_LEN,
+                               cnt->name, ETH_GSTRING_LEN);
+               j++;
+       }
+}
+
+static int vsc73xx_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+       /* We only support SS_STATS */
+       if (sset != ETH_SS_STATS)
+               return 0;
+       /* RX and TX packets, then 3 RX counters, 3 TX counters */
+       return 8;
+}
+
+static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port,
+                                     uint64_t *data)
+{
+       struct vsc73xx *vsc = ds->priv;
+       u8 regs[] = {
+               VSC73XX_RXOCT,
+               VSC73XX_C_RX0,
+               VSC73XX_C_RX1,
+               VSC73XX_C_RX2,
+               VSC73XX_TXOCT,
+               VSC73XX_C_TX0,
+               VSC73XX_C_TX1,
+               VSC73XX_C_TX2,
+       };
+       u32 val;
+       int ret;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(regs); i++) {
+               ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MAC, port,
+                                  regs[i], &val);
+               if (ret) {
+                       dev_err(vsc->dev, "error reading counter %d\n", i);
+                       return;
+               }
+               data[i] = val;
+       }
+}
+
+static const struct dsa_switch_ops vsc73xx_ds_ops = {
+       .get_tag_protocol = vsc73xx_get_tag_protocol,
+       .setup = vsc73xx_setup,
+       .phy_read = vsc73xx_phy_read,
+       .phy_write = vsc73xx_phy_write,
+       .adjust_link = vsc73xx_adjust_link,
+       .get_strings = vsc73xx_get_strings,
+       .get_ethtool_stats = vsc73xx_get_ethtool_stats,
+       .get_sset_count = vsc73xx_get_sset_count,
+       .port_enable = vsc73xx_port_enable,
+       .port_disable = vsc73xx_port_disable,
+};
+
+static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+       struct vsc73xx *vsc = gpiochip_get_data(chip);
+       u32 val;
+       int ret;
+
+       ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+                          VSC73XX_GPIO, &val);
+       if (ret)
+               return ret;
+
+       return !!(val & BIT(offset));
+}
+
+static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+                            int val)
+{
+       struct vsc73xx *vsc = gpiochip_get_data(chip);
+       u32 tmp = val ? BIT(offset) : 0;
+
+       vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+                           VSC73XX_GPIO, BIT(offset), tmp);
+}
+
+static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
+                                        unsigned int offset, int val)
+{
+       struct vsc73xx *vsc = gpiochip_get_data(chip);
+       u32 tmp = val ? BIT(offset) : 0;
+
+       return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+                                  VSC73XX_GPIO, BIT(offset + 4) | BIT(offset),
+                                  BIT(offset + 4) | tmp);
+}
+
+static int vsc73xx_gpio_direction_input(struct gpio_chip *chip,
+                                       unsigned int offset)
+{
+       struct vsc73xx *vsc = gpiochip_get_data(chip);
+
+       return  vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+                                   VSC73XX_GPIO, BIT(offset + 4),
+                                   0);
+}
+
+static int vsc73xx_gpio_get_direction(struct gpio_chip *chip,
+                                     unsigned int offset)
+{
+       struct vsc73xx *vsc = gpiochip_get_data(chip);
+       u32 val;
+       int ret;
+
+       ret = vsc73xx_read(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+                          VSC73XX_GPIO, &val);
+       if (ret)
+               return ret;
+
+       return !(val & BIT(offset + 4));
+}
+
+static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
+{
+       int ret;
+
+       vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x",
+                                      vsc->chipid);
+       vsc->gc.ngpio = 4;
+       vsc->gc.owner = THIS_MODULE;
+       vsc->gc.parent = vsc->dev;
+       vsc->gc.of_node = vsc->dev->of_node;
+       vsc->gc.base = -1;
+       vsc->gc.get = vsc73xx_gpio_get;
+       vsc->gc.set = vsc73xx_gpio_set;
+       vsc->gc.direction_input = vsc73xx_gpio_direction_input;
+       vsc->gc.direction_output = vsc73xx_gpio_direction_output;
+       vsc->gc.get_direction = vsc73xx_gpio_get_direction;
+       vsc->gc.can_sleep = true;
+       ret = devm_gpiochip_add_data(vsc->dev, &vsc->gc, vsc);
+       if (ret) {
+               dev_err(vsc->dev, "unable to register GPIO chip\n");
+               return ret;
+       }
+       return 0;
+}
+
+static int vsc73xx_probe(struct spi_device *spi)
+{
+       struct device *dev = &spi->dev;
+       struct vsc73xx *vsc;
+       int ret;
+
+       vsc = devm_kzalloc(dev, sizeof(*vsc), GFP_KERNEL);
+       if (!vsc)
+               return -ENOMEM;
+
+       spi_set_drvdata(spi, vsc);
+       vsc->spi = spi_dev_get(spi);
+       vsc->dev = dev;
+       mutex_init(&vsc->lock);
+
+       /* Release reset, if any */
+       vsc->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(vsc->reset)) {
+               dev_err(dev, "failed to get RESET GPIO\n");
+               return PTR_ERR(vsc->reset);
+       }
+       if (vsc->reset)
+               /* Wait 20ms according to datasheet table 245 */
+               msleep(20);
+
+       spi->mode = SPI_MODE_0;
+       spi->bits_per_word = 8;
+       ret = spi_setup(spi);
+       if (ret < 0) {
+               dev_err(dev, "spi setup failed.\n");
+               return ret;
+       }
+
+       ret = vsc73xx_detect(vsc);
+       if (ret) {
+               dev_err(dev, "no chip found (%d)\n", ret);
+               return -ENODEV;
+       }
+
+       eth_random_addr(vsc->addr);
+       dev_info(vsc->dev,
+                "MAC for control frames: %02X:%02X:%02X:%02X:%02X:%02X\n",
+                vsc->addr[0], vsc->addr[1], vsc->addr[2],
+                vsc->addr[3], vsc->addr[4], vsc->addr[5]);
+
+       /* The VSC7395 switch chips have 5+1 ports which means 5
+        * ordinary ports and a sixth CPU port facing the processor
+        * with an RGMII interface. These ports are numbered 0..4
+        * and 6, so they leave a "hole" in the port map for port 5,
+        * which is invalid.
+        *
+        * The VSC7398 has 8 ports, port 7 is again the CPU port.
+        *
+        * We allocate 8 ports and avoid access to the nonexistant
+        * ports.
+        */
+       vsc->ds = dsa_switch_alloc(dev, 8);
+       if (!vsc->ds)
+               return -ENOMEM;
+       vsc->ds->priv = vsc;
+
+       vsc->ds->ops = &vsc73xx_ds_ops;
+       ret = dsa_register_switch(vsc->ds);
+       if (ret) {
+               dev_err(dev, "unable to register switch (%d)\n", ret);
+               return ret;
+       }
+
+       ret = vsc73xx_gpio_probe(vsc);
+       if (ret) {
+               dsa_unregister_switch(vsc->ds);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int vsc73xx_remove(struct spi_device *spi)
+{
+       struct vsc73xx *vsc = spi_get_drvdata(spi);
+
+       dsa_unregister_switch(vsc->ds);
+       gpiod_set_value(vsc->reset, 1);
+
+       return 0;
+}
+
+static const struct of_device_id vsc73xx_of_match[] = {
+       {
+               .compatible = "vitesse,vsc7385",
+       },
+       {
+               .compatible = "vitesse,vsc7388",
+       },
+       {
+               .compatible = "vitesse,vsc7395",
+       },
+       {
+               .compatible = "vitesse,vsc7398",
+       },
+       { },
+};
+MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+
+static struct spi_driver vsc73xx_driver = {
+       .probe = vsc73xx_probe,
+       .remove = vsc73xx_remove,
+       .driver = {
+               .name = "vsc73xx",
+               .of_match_table = vsc73xx_of_match,
+       },
+};
+module_spi_driver(vsc73xx_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
+MODULE_LICENSE("GPL v2");
index 8fbfe9ce2fa53a69673671871465e816b2386ba6..22555e7fa752c67e63c1ee15c0d49562d60fff51 100644 (file)
@@ -20,7 +20,7 @@ obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
 obj-$(CONFIG_NET_VENDOR_ARC) += arc/
 obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
 obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
-obj-$(CONFIG_NET_CADENCE) += cadence/
+obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
 obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
 obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
 obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
@@ -68,7 +68,7 @@ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
 obj-$(CONFIG_LPC_ENET) += nxp/
 obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
 obj-$(CONFIG_ETHOC) += ethoc.o
-obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
+obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/
 obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
 obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
 obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
@@ -80,8 +80,7 @@ obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
 obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
 obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
 obj-$(CONFIG_NET_VENDOR_SIS) += sis/
-obj-$(CONFIG_SFC) += sfc/
-obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
+obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
 obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
 obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
index 8f71b79b494900fa4c03564e4f88ded8aa8831c5..08945baee48ad45df9f4bbf83afebbe0ab15607c 100644 (file)
@@ -1933,7 +1933,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
        while (idx != rxretprd) {
                struct ring_info *rip;
                struct sk_buff *skb;
-               struct rx_desc *rxdesc, *retdesc;
+               struct rx_desc *retdesc;
                u32 skbidx;
                int bd_flags, desc_type, mapsize;
                u16 csum;
@@ -1959,19 +1959,16 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
                case 0:
                        rip = &ap->skb->rx_std_skbuff[skbidx];
                        mapsize = ACE_STD_BUFSIZE;
-                       rxdesc = &ap->rx_std_ring[skbidx];
                        std_count++;
                        break;
                case BD_FLG_JUMBO:
                        rip = &ap->skb->rx_jumbo_skbuff[skbidx];
                        mapsize = ACE_JUMBO_BUFSIZE;
-                       rxdesc = &ap->rx_jumbo_ring[skbidx];
                        atomic_dec(&ap->cur_jumbo_bufs);
                        break;
                case BD_FLG_MINI:
                        rip = &ap->skb->rx_mini_skbuff[skbidx];
                        mapsize = ACE_MINI_BUFSIZE;
-                       rxdesc = &ap->rx_mini_ring[skbidx];
                        mini_count++;
                        break;
                default:
index f2af87d70594fca1b3c42085858fb323da295506..c673ac2df65bdf3f9b4d03403be705b581505657 100644 (file)
@@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev)
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
-                           void *accel_priv, select_queue_fallback_t fallback)
+                           struct net_device *sb_dev,
+                           select_queue_fallback_t fallback)
 {
        u16 qid;
        /* we suspect that this is good for in--kernel network services that
@@ -2223,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
        if (skb_rx_queue_recorded(skb))
                qid = skb_get_rx_queue(skb);
        else
-               qid = fallback(dev, skb);
+               qid = fallback(dev, skb, NULL);
 
        return qid;
 }
index d5c15e8bb3de706b12d343ee1a50477b23ab3d3f..f273af136fc7c995ee4df2bdb697002c916c0a48 100644 (file)
@@ -173,7 +173,7 @@ config SUNLANCE
 
 config AMD_XGBE
        tristate "AMD 10GbE Ethernet driver"
-       depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA
+       depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
        depends on X86 || ARM64 || COMPILE_TEST
        select BITREVERSE
        select CRC32
index 1205861b631896a0fc6b19ac608483d8e4b27d4e..eedd3f3dd22e220186578235c9f5f0b0072e80f6 100644 (file)
@@ -1,6 +1,5 @@
 config NET_XGENE_V2
        tristate "APM X-Gene SoC Ethernet-v2 Driver"
-       depends on HAS_DMA
        depends on ARCH_XGENE || COMPILE_TEST
        help
          This is the Ethernet driver for the on-chip ethernet interface
index afccb033177b39233a333994835713d577339c2f..e4e33c900b577161e77974bd62c45030cb2762e8 100644 (file)
@@ -1,6 +1,5 @@
 config NET_XGENE
        tristate "APM X-Gene SoC Ethernet Driver"
-       depends on HAS_DMA
        depends on ARCH_XGENE || COMPILE_TEST
        select PHYLIB
        select MDIO_XGENE
index f2d8063a2cefd8f7581f0e2182b81b1ce773a92a..08c9fa6ca71f273b5695887005ce6a5025fbc9c5 100644 (file)
@@ -11,6 +11,7 @@
 
 #include "aq_ethtool.h"
 #include "aq_nic.h"
+#include "aq_vec.h"
 
 static void aq_ethtool_get_regs(struct net_device *ndev,
                                struct ethtool_regs *regs, void *p)
@@ -284,6 +285,117 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
        return aq_nic_update_interrupt_moderation_settings(aq_nic);
 }
 
+static int aq_ethtool_nway_reset(struct net_device *ndev)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+       if (unlikely(!aq_nic->aq_fw_ops->renegotiate))
+               return -EOPNOTSUPP;
+
+       if (netif_running(ndev))
+               return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
+
+       return 0;
+}
+
+static void aq_ethtool_get_pauseparam(struct net_device *ndev,
+                                     struct ethtool_pauseparam *pause)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+       pause->autoneg = 0;
+
+       if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+               pause->rx_pause = 1;
+       if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+               pause->tx_pause = 1;
+}
+
+static int aq_ethtool_set_pauseparam(struct net_device *ndev,
+                                    struct ethtool_pauseparam *pause)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       int err = 0;
+
+       if (!aq_nic->aq_fw_ops->set_flow_control)
+               return -EOPNOTSUPP;
+
+       if (pause->autoneg == AUTONEG_ENABLE)
+               return -EOPNOTSUPP;
+
+       if (pause->rx_pause)
+               aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_RX;
+       else
+               aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_RX;
+
+       if (pause->tx_pause)
+               aq_nic->aq_hw->aq_nic_cfg->flow_control |= AQ_NIC_FC_TX;
+       else
+               aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
+
+       err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
+
+       return err;
+}
+
+static void aq_get_ringparam(struct net_device *ndev,
+                            struct ethtool_ringparam *ring)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+
+       ring->rx_pending = aq_nic_cfg->rxds;
+       ring->tx_pending = aq_nic_cfg->txds;
+
+       ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
+       ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
+}
+
+static int aq_set_ringparam(struct net_device *ndev,
+                           struct ethtool_ringparam *ring)
+{
+       int err = 0;
+       bool ndev_running = false;
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(aq_nic);
+       const struct aq_hw_caps_s *hw_caps = aq_nic_cfg->aq_hw_caps;
+
+       if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
+               err = -EOPNOTSUPP;
+               goto err_exit;
+       }
+
+       if (netif_running(ndev)) {
+               ndev_running = true;
+               dev_close(ndev);
+       }
+
+       aq_nic_free_vectors(aq_nic);
+
+       aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
+       aq_nic_cfg->rxds = min(aq_nic_cfg->rxds, hw_caps->rxds_max);
+       aq_nic_cfg->rxds = ALIGN(aq_nic_cfg->rxds, AQ_HW_RXD_MULTIPLE);
+
+       aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
+       aq_nic_cfg->txds = min(aq_nic_cfg->txds, hw_caps->txds_max);
+       aq_nic_cfg->txds = ALIGN(aq_nic_cfg->txds, AQ_HW_TXD_MULTIPLE);
+
+       for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < aq_nic_cfg->vecs;
+            aq_nic->aq_vecs++) {
+               aq_nic->aq_vec[aq_nic->aq_vecs] =
+                   aq_vec_alloc(aq_nic, aq_nic->aq_vecs, aq_nic_cfg);
+               if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
+                       err = -ENOMEM;
+                       goto err_exit;
+               }
+       }
+       if (ndev_running)
+               err = dev_open(ndev);
+
+err_exit:
+       return err;
+}
+
 const struct ethtool_ops aq_ethtool_ops = {
        .get_link            = aq_ethtool_get_link,
        .get_regs_len        = aq_ethtool_get_regs_len,
@@ -291,6 +403,11 @@ const struct ethtool_ops aq_ethtool_ops = {
        .get_drvinfo         = aq_ethtool_get_drvinfo,
        .get_strings         = aq_ethtool_get_strings,
        .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
+       .nway_reset          = aq_ethtool_nway_reset,
+       .get_ringparam       = aq_get_ringparam,
+       .set_ringparam       = aq_set_ringparam,
+       .get_pauseparam      = aq_ethtool_get_pauseparam,
+       .set_pauseparam      = aq_ethtool_set_pauseparam,
        .get_rxfh_key_size   = aq_ethtool_get_rss_key_size,
        .get_rxfh            = aq_ethtool_get_rss,
        .get_rxnfc           = aq_ethtool_get_rxnfc,
index a2d416b24ffc251c71d002a9befe825d5c585fbc..1a51152029c38da32b0a1ef865340ef272ed5419 100644 (file)
@@ -24,8 +24,10 @@ struct aq_hw_caps_s {
        u64 link_speed_msk;
        unsigned int hw_priv_flags;
        u32 media_type;
-       u32 rxds;
-       u32 txds;
+       u32 rxds_max;
+       u32 txds_max;
+       u32 rxds_min;
+       u32 txds_min;
        u32 txhwb_alignment;
        u32 irq_mask;
        u32 vecs;
@@ -98,6 +100,9 @@ struct aq_stats_s {
 #define AQ_HW_MEDIA_TYPE_TP    1U
 #define AQ_HW_MEDIA_TYPE_FIBRE 2U
 
+#define AQ_HW_TXD_MULTIPLE 8U
+#define AQ_HW_RXD_MULTIPLE 8U
+
 struct aq_hw_s {
        atomic_t flags;
        u8 rbl_enabled:1;
@@ -197,25 +202,30 @@ struct aq_hw_ops {
 
        int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
 
-       int (*hw_deinit)(struct aq_hw_s *self);
-
        int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
 };
 
 struct aq_fw_ops {
        int (*init)(struct aq_hw_s *self);
 
+       int (*deinit)(struct aq_hw_s *self);
+
        int (*reset)(struct aq_hw_s *self);
 
+       int (*renegotiate)(struct aq_hw_s *self);
+
        int (*get_mac_permanent)(struct aq_hw_s *self, u8 *mac);
 
        int (*set_link_speed)(struct aq_hw_s *self, u32 speed);
 
-       int (*set_state)(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state);
+       int (*set_state)(struct aq_hw_s *self,
+                        enum hal_atl_utils_fw_state_e state);
 
        int (*update_link_status)(struct aq_hw_s *self);
 
        int (*update_stats)(struct aq_hw_s *self);
+
+       int (*set_flow_control)(struct aq_hw_s *self);
 };
 
 #endif /* AQ_HW_H */
index 1a1a6380c128c4522b330907cc16258f0e012189..21cfb327d791045b2bcb5128f34b27489cc50812 100644 (file)
@@ -89,8 +89,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
        aq_nic_rss_init(self, cfg->num_rss_queues);
 
        /*descriptors */
-       cfg->rxds = min(cfg->aq_hw_caps->rxds, AQ_CFG_RXDS_DEF);
-       cfg->txds = min(cfg->aq_hw_caps->txds, AQ_CFG_TXDS_DEF);
+       cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
+       cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
 
        /*rss rings */
        cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
@@ -761,10 +761,14 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     100baseT_Full);
 
-       if (self->aq_nic_cfg.flow_control)
+       if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     Pause);
 
+       if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Asym_Pause);
+
        if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
                ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
        else
@@ -879,7 +883,7 @@ void aq_nic_deinit(struct aq_nic_s *self)
                aq_vec_deinit(aq_vec);
 
        if (self->power_state == AQ_HW_POWER_STATE_D0) {
-               (void)self->aq_hw_ops->hw_deinit(self->aq_hw);
+               (void)self->aq_fw_ops->deinit(self->aq_hw);
        } else {
                (void)self->aq_hw_ops->hw_set_power(self->aq_hw,
                                                   self->power_state);
index 67e2f9fb9402f3ed419ee46c47a7f6bd4d8e1ffc..ed7fe6f2e360dd9d46d6a87c675dce64b0fb9cac 100644 (file)
 #include "hw_atl_a0_internal.h"
 
 #define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
-       .is_64_dma = true, \
-       .msix_irqs = 4U, \
-       .irq_mask = ~0U, \
-       .vecs = HW_ATL_A0_RSS_MAX, \
-       .tcs = HW_ATL_A0_TC_MAX, \
-       .rxd_alignment = 1U, \
-       .rxd_size = HW_ATL_A0_RXD_SIZE, \
-       .rxds = 248U, \
-       .txd_alignment = 1U, \
-       .txd_size = HW_ATL_A0_TXD_SIZE, \
-       .txds = 8U * 1024U, \
-       .txhwb_alignment = 4096U, \
-       .tx_rings = HW_ATL_A0_TX_RINGS, \
-       .rx_rings = HW_ATL_A0_RX_RINGS, \
-       .hw_features = NETIF_F_HW_CSUM | \
-                       NETIF_F_RXHASH | \
-                       NETIF_F_RXCSUM | \
-                       NETIF_F_SG | \
-                       NETIF_F_TSO, \
+       .is_64_dma = true,                \
+       .msix_irqs = 4U,                  \
+       .irq_mask = ~0U,                  \
+       .vecs = HW_ATL_A0_RSS_MAX,        \
+       .tcs = HW_ATL_A0_TC_MAX,          \
+       .rxd_alignment = 1U,              \
+       .rxd_size = HW_ATL_A0_RXD_SIZE,   \
+       .rxds_max = HW_ATL_A0_MAX_RXD,    \
+       .rxds_min = HW_ATL_A0_MIN_RXD,    \
+       .txd_alignment = 1U,              \
+       .txd_size = HW_ATL_A0_TXD_SIZE,   \
+       .txds_max = HW_ATL_A0_MAX_TXD,    \
+       .txds_min = HW_ATL_A0_MIN_RXD,    \
+       .txhwb_alignment = 4096U,         \
+       .tx_rings = HW_ATL_A0_TX_RINGS,   \
+       .rx_rings = HW_ATL_A0_RX_RINGS,   \
+       .hw_features = NETIF_F_HW_CSUM |  \
+                       NETIF_F_RXHASH |  \
+                       NETIF_F_RXCSUM |  \
+                       NETIF_F_SG |      \
+                       NETIF_F_TSO,      \
        .hw_priv_flags = IFF_UNICAST_FLT, \
-       .flow_control = true, \
-       .mtu = HW_ATL_A0_MTU_JUMBO, \
-       .mac_regs_count = 88, \
+       .flow_control = true,             \
+       .mtu = HW_ATL_A0_MTU_JUMBO,       \
+       .mac_regs_count = 88,             \
        .hw_alive_check_addr = 0x10U
 
 const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
@@ -875,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
 const struct aq_hw_ops hw_atl_ops_a0 = {
        .hw_set_mac_address   = hw_atl_a0_hw_mac_addr_set,
        .hw_init              = hw_atl_a0_hw_init,
-       .hw_deinit            = hw_atl_utils_hw_deinit,
        .hw_set_power         = hw_atl_utils_hw_set_power,
        .hw_reset             = hw_atl_a0_hw_reset,
        .hw_start             = hw_atl_a0_hw_start,
index 1d8855558d74b902702902ce740d2a92c97f9bd7..3c94cff57876dcb7d59eee357b05be3a02f6f2b7 100644 (file)
 
 #define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
 
+#define HW_ATL_A0_MIN_RXD \
+       (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_A0_MIN_TXD \
+       (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_A0_MAX_RXD 8184U
+#define HW_ATL_A0_MAX_TXD 8184U
+
 #endif /* HW_ATL_A0_INTERNAL_H */
index 819f6bcf9b4ee76e620691ae3861a1fad213eca9..9dd4f497676cfb983618868cc539e0e3e93df721 100644 (file)
 #include "hw_atl_llh_internal.h"
 
 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
-       .is_64_dma = true,      \
-       .msix_irqs = 4U,        \
-       .irq_mask = ~0U,        \
-       .vecs = HW_ATL_B0_RSS_MAX,      \
-       .tcs = HW_ATL_B0_TC_MAX,        \
-       .rxd_alignment = 1U,            \
-       .rxd_size = HW_ATL_B0_RXD_SIZE, \
-       .rxds = 4U * 1024U,             \
-       .txd_alignment = 1U,            \
-       .txd_size = HW_ATL_B0_TXD_SIZE, \
-       .txds = 8U * 1024U,             \
-       .txhwb_alignment = 4096U,       \
-       .tx_rings = HW_ATL_B0_TX_RINGS, \
-       .rx_rings = HW_ATL_B0_RX_RINGS, \
-       .hw_features = NETIF_F_HW_CSUM | \
-                       NETIF_F_RXCSUM | \
-                       NETIF_F_RXHASH | \
-                       NETIF_F_SG |  \
-                       NETIF_F_TSO | \
-                       NETIF_F_LRO,  \
-       .hw_priv_flags = IFF_UNICAST_FLT,   \
-       .flow_control = true,           \
-       .mtu = HW_ATL_B0_MTU_JUMBO,     \
-       .mac_regs_count = 88,           \
+       .is_64_dma = true,                \
+       .msix_irqs = 4U,                  \
+       .irq_mask = ~0U,                  \
+       .vecs = HW_ATL_B0_RSS_MAX,        \
+       .tcs = HW_ATL_B0_TC_MAX,          \
+       .rxd_alignment = 1U,              \
+       .rxd_size = HW_ATL_B0_RXD_SIZE,   \
+       .rxds_max = HW_ATL_B0_MAX_RXD,    \
+       .rxds_min = HW_ATL_B0_MIN_RXD,    \
+       .txd_alignment = 1U,              \
+       .txd_size = HW_ATL_B0_TXD_SIZE,   \
+       .txds_max = HW_ATL_B0_MAX_TXD,    \
+       .txds_min = HW_ATL_B0_MIN_TXD,    \
+       .txhwb_alignment = 4096U,         \
+       .tx_rings = HW_ATL_B0_TX_RINGS,   \
+       .rx_rings = HW_ATL_B0_RX_RINGS,   \
+       .hw_features = NETIF_F_HW_CSUM |  \
+                       NETIF_F_RXCSUM |  \
+                       NETIF_F_RXHASH |  \
+                       NETIF_F_SG |      \
+                       NETIF_F_TSO |     \
+                       NETIF_F_LRO,      \
+       .hw_priv_flags = IFF_UNICAST_FLT, \
+       .flow_control = true,             \
+       .mtu = HW_ATL_B0_MTU_JUMBO,       \
+       .mac_regs_count = 88,             \
        .hw_alive_check_addr = 0x10U
 
 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
@@ -933,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
 const struct aq_hw_ops hw_atl_ops_b0 = {
        .hw_set_mac_address   = hw_atl_b0_hw_mac_addr_set,
        .hw_init              = hw_atl_b0_hw_init,
-       .hw_deinit            = hw_atl_utils_hw_deinit,
        .hw_set_power         = hw_atl_utils_hw_set_power,
        .hw_reset             = hw_atl_b0_hw_reset,
        .hw_start             = hw_atl_b0_hw_start,
index 405d1455c22250bd6f5b7dcce531b269758a305b..28568f5fa74b0f2d72d460c755fad49c09089889 100644 (file)
 #define HW_ATL_INTR_MODER_MAX  0x1FF
 #define HW_ATL_INTR_MODER_MIN  0xFF
 
+#define HW_ATL_B0_MIN_RXD \
+       (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
+#define HW_ATL_B0_MIN_TXD \
+       (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
+
+#define HW_ATL_B0_MAX_RXD 8184U
+#define HW_ATL_B0_MAX_TXD 8184U
+
 /* HW layer capabilities */
 
 #endif /* HW_ATL_B0_INTERNAL_H */
index e652d86b87d40eb9c0050c7ce525c2a5e3ab2513..c965e65d07db3be832b0edd332eaedfb17976143 100644 (file)
 #define HW_ATL_MPI_CONTROL_ADR  0x0368U
 #define HW_ATL_MPI_STATE_ADR    0x036CU
 
-#define HW_ATL_MPI_STATE_MSK    0x00FFU
-#define HW_ATL_MPI_STATE_SHIFT  0U
-#define HW_ATL_MPI_SPEED_MSK    0xFFFF0000U
-#define HW_ATL_MPI_SPEED_SHIFT  16U
+#define HW_ATL_MPI_STATE_MSK      0x00FFU
+#define HW_ATL_MPI_STATE_SHIFT    0U
+#define HW_ATL_MPI_SPEED_MSK      0x00FF0000U
+#define HW_ATL_MPI_SPEED_SHIFT    16U
+#define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
 
 #define HW_ATL_MPI_DAISY_CHAIN_STATUS  0x704
 #define HW_ATL_MPI_BOOT_EXIT_CODE      0x388
@@ -525,19 +526,20 @@ static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
 {
        u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
 
-       val = (val & HW_ATL_MPI_STATE_MSK) | (speed << HW_ATL_MPI_SPEED_SHIFT);
+       val = val & ~HW_ATL_MPI_SPEED_MSK;
+       val |= speed << HW_ATL_MPI_SPEED_SHIFT;
        aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
 
        return 0;
 }
 
-void hw_atl_utils_mpi_set(struct aq_hw_s *self,
-                         enum hal_atl_utils_fw_state_e state,
-                         u32 speed)
+static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
+                                     enum hal_atl_utils_fw_state_e state)
 {
        int err = 0;
        u32 transaction_id = 0;
        struct hw_aq_atl_utils_mbox_header mbox;
+       u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
 
        if (state == MPI_RESET) {
                hw_atl_utils_mpi_read_mbox(self, &mbox);
@@ -551,21 +553,21 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
                if (err < 0)
                        goto err_exit;
        }
+       /* On interface DEINIT we disable DW (raise bit)
+        * Otherwise enable DW (clear bit)
+        */
+       if (state == MPI_DEINIT || state == MPI_POWER)
+               val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
+       else
+               val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
 
-       aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR,
-                       (speed << HW_ATL_MPI_SPEED_SHIFT) | state);
-
-err_exit:;
-}
-
-static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
-                                     enum hal_atl_utils_fw_state_e state)
-{
-       u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
+       /* Set new state bits */
+       val = val & ~HW_ATL_MPI_STATE_MSK;
+       val |= state & HW_ATL_MPI_STATE_MSK;
 
-       val = state | (val & HW_ATL_MPI_SPEED_MSK);
        aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
-       return 0;
+err_exit:
+       return err;
 }
 
 int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
@@ -721,16 +723,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
        *p = chip_features;
 }
 
-int hw_atl_utils_hw_deinit(struct aq_hw_s *self)
+static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
 {
-       hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U);
+       hw_atl_utils_mpi_set_speed(self, 0);
+       hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
        return 0;
 }
 
 int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
                              unsigned int power_state)
 {
-       hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U);
+       hw_atl_utils_mpi_set_speed(self, 0);
+       hw_atl_utils_mpi_set_state(self, MPI_POWER);
        return 0;
 }
 
@@ -823,10 +827,12 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
 
 const struct aq_fw_ops aq_fw_1x_ops = {
        .init = hw_atl_utils_mpi_create,
+       .deinit = hw_atl_fw1x_deinit,
        .reset = NULL,
        .get_mac_permanent = hw_atl_utils_get_mac_permanent,
        .set_link_speed = hw_atl_utils_mpi_set_speed,
        .set_state = hw_atl_utils_mpi_set_state,
        .update_link_status = hw_atl_utils_mpi_get_link_status,
        .update_stats = hw_atl_utils_update_stats,
+       .set_flow_control = NULL,
 };
index cd8f18f39c611f8f709f71c7a1c23da8332a3fa4..b875590efcbddbeb5983f2da99f9785d4298734c 100644 (file)
@@ -239,6 +239,41 @@ enum hw_atl_fw2x_caps_hi {
        CAPS_HI_TRANSACTION_ID,
 };
 
+enum hw_atl_fw2x_ctrl {
+       CTRL_RESERVED1 = 0x00,
+       CTRL_RESERVED2,
+       CTRL_RESERVED3,
+       CTRL_PAUSE,
+       CTRL_ASYMMETRIC_PAUSE,
+       CTRL_RESERVED4,
+       CTRL_RESERVED5,
+       CTRL_RESERVED6,
+       CTRL_1GBASET_FD_EEE,
+       CTRL_2P5GBASET_FD_EEE,
+       CTRL_5GBASET_FD_EEE,
+       CTRL_10GBASET_FD_EEE,
+       CTRL_THERMAL_SHUTDOWN,
+       CTRL_PHY_LOGS,
+       CTRL_EEE_AUTO_DISABLE,
+       CTRL_PFC,
+       CTRL_WAKE_ON_LINK,
+       CTRL_CABLE_DIAG,
+       CTRL_TEMPERATURE,
+       CTRL_DOWNSHIFT,
+       CTRL_PTP_AVB,
+       CTRL_RESERVED7,
+       CTRL_LINK_DROP,
+       CTRL_SLEEP_PROXY,
+       CTRL_WOL,
+       CTRL_MAC_STOP,
+       CTRL_EXT_LOOPBACK,
+       CTRL_INT_LOOPBACK,
+       CTRL_RESERVED8,
+       CTRL_WOL_TIMER,
+       CTRL_STATISTICS,
+       CTRL_FORCE_RECONNECT,
+};
+
 struct aq_hw_s;
 struct aq_fw_ops;
 struct aq_hw_caps_s;
index 39cd3a27fe776cdee650fe7e55dfb2bdabd35ac9..e37943760a58b2a88b33de295e73dfbac71fc5bf 100644 (file)
 #define HW_ATL_FW2X_MPI_STATE_ADDR     0x370
 #define HW_ATL_FW2X_MPI_STATE2_ADDR    0x374
 
+static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed);
+static int aq_fw2x_set_state(struct aq_hw_s *self,
+                            enum hal_atl_utils_fw_state_e state);
+
 static int aq_fw2x_init(struct aq_hw_s *self)
 {
        int err = 0;
@@ -39,6 +43,16 @@ static int aq_fw2x_init(struct aq_hw_s *self)
        return err;
 }
 
+static int aq_fw2x_deinit(struct aq_hw_s *self)
+{
+       int err = aq_fw2x_set_link_speed(self, 0);
+
+       if (!err)
+               err = aq_fw2x_set_state(self, MPI_DEINIT);
+
+       return err;
+}
+
 static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
 {
        enum hw_atl_fw2x_rate rate = 0;
@@ -73,10 +87,38 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
        return 0;
 }
 
+static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
+{
+       if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
+               *mpi_state |= BIT(CAPS_HI_PAUSE);
+       else
+               *mpi_state &= ~BIT(CAPS_HI_PAUSE);
+
+       if (self->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
+               *mpi_state |= BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+       else
+               *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
+}
+
 static int aq_fw2x_set_state(struct aq_hw_s *self,
                             enum hal_atl_utils_fw_state_e state)
 {
-       /* No explicit state in 2x fw */
+       u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+       switch (state) {
+       case MPI_INIT:
+               mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
+               aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+               break;
+       case MPI_DEINIT:
+               mpi_state |= BIT(CAPS_HI_LINK_DROP);
+               break;
+       case MPI_RESET:
+       case MPI_POWER:
+               /* No actions */
+               break;
+       }
+       aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
        return 0;
 }
 
@@ -173,12 +215,37 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
        return hw_atl_utils_update_stats(self);
 }
 
+static int aq_fw2x_renegotiate(struct aq_hw_s *self)
+{
+       u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+       mpi_opts |= BIT(CTRL_FORCE_RECONNECT);
+
+       aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+       return 0;
+}
+
+static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
+{
+       u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+       aq_fw2x_set_mpi_flow_control(self, &mpi_state);
+
+       aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_state);
+
+       return 0;
+}
+
 const struct aq_fw_ops aq_fw_2x_ops = {
        .init = aq_fw2x_init,
+       .deinit = aq_fw2x_deinit,
        .reset = NULL,
+       .renegotiate = aq_fw2x_renegotiate,
        .get_mac_permanent = aq_fw2x_get_mac_permanent,
        .set_link_speed = aq_fw2x_set_link_speed,
        .set_state = aq_fw2x_set_state,
        .update_link_status = aq_fw2x_update_link_status,
        .update_stats = aq_fw2x_update_stats,
+       .set_flow_control   = aq_fw2x_set_flow_control,
 };
index a445de6837a6c8bff1c250d4702612f4795b2477..94efc6477bdcdab9f5be1ddf0162a26617f46bcf 100644 (file)
@@ -12,8 +12,8 @@
 
 #define NIC_MAJOR_DRIVER_VERSION           2
 #define NIC_MINOR_DRIVER_VERSION           0
-#define NIC_BUILD_DRIVER_VERSION           2
-#define NIC_REVISION_DRIVER_VERSION        1
+#define NIC_BUILD_DRIVER_VERSION           3
+#define NIC_REVISION_DRIVER_VERSION        0
 
 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
 
index e743ddf46343302fe69c4c562c7cba239fe06dd9..5d0ab8e74b680cc6e75de6e91b79115b4637daa7 100644 (file)
@@ -24,7 +24,8 @@ config ARC_EMAC_CORE
 config ARC_EMAC
        tristate "ARC EMAC support"
        select ARC_EMAC_CORE
-       depends on OF_IRQ && OF_NET && HAS_DMA && (ARC || COMPILE_TEST)
+       depends on OF_IRQ && OF_NET
+       depends on ARC || COMPILE_TEST
        ---help---
          On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
          non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -33,7 +34,8 @@ config ARC_EMAC
 config EMAC_ROCKCHIP
        tristate "Rockchip EMAC support"
        select ARC_EMAC_CORE
-       depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA && (ARCH_ROCKCHIP || COMPILE_TEST)
+       depends on OF_IRQ && OF_NET && REGULATOR
+       depends on ARCH_ROCKCHIP || COMPILE_TEST
        ---help---
          Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
          This selects Rockchip SoC glue layer support for the
index 567ee54504bcd6eba897009259f691b74b77609e..6d32211349275d260dbc7817c639df71c91b7084 100644 (file)
@@ -1279,7 +1279,6 @@ static void alx_check_link(struct alx_priv *alx)
        struct alx_hw *hw = &alx->hw;
        unsigned long flags;
        int old_speed;
-       u8 old_duplex;
        int err;
 
        /* clear PHY internal interrupt status, otherwise the main
@@ -1288,7 +1287,6 @@ static void alx_check_link(struct alx_priv *alx)
        alx_clear_phy_intr(hw);
 
        old_speed = hw->link_speed;
-       old_duplex = hw->duplex;
        err = alx_read_phy_link(hw);
        if (err < 0)
                goto reset;
@@ -1897,13 +1895,19 @@ static int alx_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct alx_priv *alx = pci_get_drvdata(pdev);
        struct alx_hw *hw = &alx->hw;
+       int err;
 
        alx_reset_phy(hw);
 
        if (!netif_running(alx->dev))
                return 0;
        netif_device_attach(alx->dev);
-       return __alx_open(alx, true);
+
+       rtnl_lock();
+       err = __alx_open(alx, true);
+       rtnl_unlock();
+
+       return err;
 }
 
 static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
index 8ba7f8ff3434000f57f45968d8a70148f4647cc8..392f564d8fd436f271f08040855f333eca0f15d7 100644 (file)
@@ -1,5 +1,6 @@
 config NET_VENDOR_AURORA
        bool "Aurora VLSI devices"
+       default y
        help
          If you have a network (Ethernet) device belonging to this class,
          say Y.
index e94159507847b33962f99d63561301b924fd2dd1..c8d1f8fa4713401321767218423f6a55375519be 100644 (file)
@@ -304,12 +304,10 @@ static int nb8800_poll(struct napi_struct *napi, int budget)
 
 again:
        do {
-               struct nb8800_rx_buf *rxb;
                unsigned int len;
 
                next = (last + 1) % RX_DESC_COUNT;
 
-               rxb = &priv->rx_bufs[next];
                rxd = &priv->rx_descs[next];
 
                if (!rxd->report)
index af75156919edfead9bbe1e223b92d45d4fdd444e..4c3bfde6e8de00f2010b1329e05c8b36a16e158f 100644 (file)
@@ -157,7 +157,6 @@ config BGMAC
 config BGMAC_BCMA
        tristate "Broadcom iProc GBit BCMA support"
        depends on BCMA && BCMA_HOST_SOC
-       depends on HAS_DMA
        depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
        select BGMAC
        select PHYLIB
@@ -170,7 +169,6 @@ config BGMAC_BCMA
 
 config BGMAC_PLATFORM
        tristate "Broadcom iProc GBit platform support"
-       depends on HAS_DMA
        depends on ARCH_BCM_IPROC || COMPILE_TEST
        depends on OF
        select BGMAC
index d5fca2e5a9bc34ad6edfa295e378dfe12078c0e5..eb890c4b3b2d2764563fc50d708079ac4a411d7d 100644 (file)
@@ -2107,7 +2107,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
 };
 
 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
-                                   void *accel_priv,
+                                   struct net_device *sb_dev,
                                    select_queue_fallback_t fallback)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
        unsigned int q, port;
 
        if (!netdev_uses_dsa(dev))
-               return fallback(dev, skb);
+               return fallback(dev, skb, NULL);
 
        /* DSA tagging layer will have configured the correct queue */
        q = BRCM_TAG_GET_QUEUE(queue);
@@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
        tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
 
        if (unlikely(!tx_ring))
-               return fallback(dev, skb);
+               return fallback(dev, skb, NULL);
 
        return tx_ring->index;
 }
index e6ea8e61f96ddc17c09a7f90f3f37727ca49f7ca..4c94d9218bba9c75cf8183a8b0feca7a9aaba18c 100644 (file)
@@ -236,7 +236,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
 {
        struct device *dma_dev = bgmac->dma_dev;
        int empty_slot;
-       bool freed = false;
        unsigned bytes_compl = 0, pkts_compl = 0;
 
        /* The last slot that hardware didn't consume yet */
@@ -279,7 +278,6 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
 
                slot->dma_addr = 0;
                ring->start++;
-               freed = true;
        }
 
        if (!pkts_compl)
index d847e1b9c37b5afff33e799e919e3ff39b5cd1e8..be1506169076f0a89f6a621d01dce81afe720ba7 100644 (file)
@@ -1533,6 +1533,7 @@ struct bnx2x {
        struct link_vars        link_vars;
        u32                     link_cnt;
        struct bnx2x_link_report_data last_reported_link;
+       bool                    force_link_down;
 
        struct mdio_if_info     mdio;
 
index 8cd73ff5debc276aec53d1f056fe3040875b2c0a..5a727d4729da7348075b75101154cca3cf515073 100644 (file)
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
 {
        struct bnx2x_link_report_data cur_data;
 
+       if (bp->force_link_down) {
+               bp->link_vars.link_up = 0;
+               return;
+       }
+
        /* reread mf_cfg */
        if (IS_PF(bp) && !CHIP_IS_E1(bp))
                bnx2x_read_mf_cfg(bp);
@@ -1905,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
 }
 
 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
-                      void *accel_priv, select_queue_fallback_t fallback)
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
@@ -1927,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
        }
 
        /* select a non-FCoE queue */
-       return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+       return fallback(dev, skb, NULL) %
+              (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -2817,6 +2824,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                bp->pending_max = 0;
        }
 
+       bp->force_link_down = false;
        if (bp->port.pmf) {
                rc = bnx2x_initial_phy_init(bp, load_mode);
                if (rc)
index a8ce5c55bbb0ca29b5cca28171cd94733e7260eb..0e508e5defce315f2e5254ca238afe26b523054a 100644 (file)
@@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
 
 /* select_queue callback */
 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
-                      void *accel_priv, select_queue_fallback_t fallback);
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback);
 
 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
                                        struct bnx2x_fastpath *fp,
index 22243c480a05341238850e71b9bd196bc705a064..98d4c5a3ff21171257765c0eda4333f0a8484143 100644 (file)
@@ -6339,6 +6339,7 @@ int bnx2x_set_led(struct link_params *params,
                 */
                if (!vars->link_up)
                        break;
+               /* else: fall through */
        case LED_MODE_ON:
                if (((params->phy[EXT_PHY1].type ==
                          PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
@@ -12521,11 +12522,13 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
        switch (link_config  & PORT_FEATURE_LINK_SPEED_MASK) {
        case PORT_FEATURE_LINK_SPEED_10M_HALF:
                phy->req_duplex = DUPLEX_HALF;
+               /* fall through */
        case PORT_FEATURE_LINK_SPEED_10M_FULL:
                phy->req_line_speed = SPEED_10;
                break;
        case PORT_FEATURE_LINK_SPEED_100M_HALF:
                phy->req_duplex = DUPLEX_HALF;
+               /* fall through */
        case PORT_FEATURE_LINK_SPEED_100M_FULL:
                phy->req_line_speed = SPEED_100;
                break;
index 5b1ed240bf18be0963cc580ab4256b6adc924046..71362b7f60402545c3b4aa2bc391a1e2d3cd7f7f 100644 (file)
@@ -8561,11 +8561,11 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
                               bp->num_queues,
                               1 + bp->num_cnic_queues);
 
-               /* falling through... */
+               /* fall through */
        case BNX2X_INT_MODE_MSI:
                bnx2x_enable_msi(bp);
 
-               /* falling through... */
+               /* fall through */
        case BNX2X_INT_MODE_INTX:
                bp->num_ethernet_queues = 1;
                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
                bp->sp_rtnl_state = 0;
                smp_mb();
 
+               /* Immediately indicate link as down */
+               bp->link_vars.link_up = 0;
+               bp->force_link_down = true;
+               netif_carrier_off(bp->dev);
+               BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
                bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
                /* When ret value shows failure of allocation failure,
                 * the nic is rebooted again. If open still fails, a error
index 8baf9d3eb4b1c1a1dd84abe031b7c19c8dbb7370..3f4d2c8da21a3a848b4149758883333522b6f77a 100644 (file)
@@ -3258,7 +3258,7 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
        /* DEL command deletes all currently configured MACs */
        case BNX2X_MCAST_CMD_DEL:
                o->set_registry_size(o, 0);
-               /* Don't break */
+               /* fall through */
 
        /* RESTORE command will restore the entire multicast configuration */
        case BNX2X_MCAST_CMD_RESTORE:
@@ -3592,7 +3592,7 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
        /* DEL command deletes all currently configured MACs */
        case BNX2X_MCAST_CMD_DEL:
                o->set_registry_size(o, 0);
-               /* Don't break */
+               /* fall through */
 
        /* RESTORE command will restore the entire multicast configuration */
        case BNX2X_MCAST_CMD_RESTORE:
index dc77bfded8652d7d200a123838cf8e07b5201ff8..62da465377340249af3e7e0671f7802272ba410a 100644 (file)
@@ -1827,6 +1827,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
                DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
                   vf->abs_vfid, qidx);
                bnx2x_vf_handle_rss_update_eqe(bp, vf);
+               /* fall through */
        case EVENT_RING_OPCODE_VF_FLR:
                /* Do nothing for now */
                return 0;
index 176fc9f4d7defe6a9d5b513902c97f56d732b323..2cf726e31461b72e684a07cf053027ff1eea6cc2 100644 (file)
@@ -1727,7 +1727,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
                                            speed);
                }
                set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
-               /* fall thru */
+               /* fall through */
        }
        case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
                set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
@@ -3012,13 +3012,6 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
                          bp->hwrm_cmd_resp_dma_addr);
 
        bp->hwrm_cmd_resp_addr = NULL;
-       if (bp->hwrm_dbg_resp_addr) {
-               dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
-                                 bp->hwrm_dbg_resp_addr,
-                                 bp->hwrm_dbg_resp_dma_addr);
-
-               bp->hwrm_dbg_resp_addr = NULL;
-       }
 }
 
 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -3030,12 +3023,6 @@ static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
                                                   GFP_KERNEL);
        if (!bp->hwrm_cmd_resp_addr)
                return -ENOMEM;
-       bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
-                                                   HWRM_DBG_REG_BUF_SIZE,
-                                                   &bp->hwrm_dbg_resp_dma_addr,
-                                                   GFP_KERNEL);
-       if (!bp->hwrm_dbg_resp_addr)
-               netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
 
        return 0;
 }
@@ -7984,7 +7971,7 @@ static int bnxt_setup_tc_block(struct net_device *dev,
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
-                                            bp, bp);
+                                            bp, bp, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
                return 0;
index 9b14eb610b9f653b61092d74b3ab9257a84383d9..709ba86d3a02ab12196fe3629d41a8679a9b5a1f 100644 (file)
@@ -1287,9 +1287,6 @@ struct bnxt {
        dma_addr_t              hwrm_short_cmd_req_dma_addr;
        void                    *hwrm_cmd_resp_addr;
        dma_addr_t              hwrm_cmd_resp_dma_addr;
-       void                    *hwrm_dbg_resp_addr;
-       dma_addr_t              hwrm_dbg_resp_dma_addr;
-#define HWRM_DBG_REG_BUF_SIZE  128
 
        struct rx_port_stats    *hw_rx_port_stats;
        struct tx_port_stats    *hw_tx_port_stats;
index 402fa32f7a8802140513f7cf12501f51e7ca9d0f..7bd96ab4f7c5793e763b7cc9ba135e7242c10827 100644 (file)
@@ -21,16 +21,99 @@ static const struct devlink_ops bnxt_dl_ops = {
 #endif /* CONFIG_BNXT_SRIOV */
 };
 
+static const struct bnxt_dl_nvm_param nvm_params[] = {
+       {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
+        BNXT_NVM_SHARED_CFG, 1},
+};
+
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+                            int msg_len, union devlink_param_value *val)
+{
+       struct hwrm_nvm_variable_input *req = msg;
+       void *data_addr = NULL, *buf = NULL;
+       struct bnxt_dl_nvm_param nvm_param;
+       int bytesize, idx = 0, rc, i;
+       dma_addr_t data_dma_addr;
+
+       /* Get/Set NVM CFG parameter is supported only on PFs */
+       if (BNXT_VF(bp))
+               return -EPERM;
+
+       for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+               if (nvm_params[i].id == param_id) {
+                       nvm_param = nvm_params[i];
+                       break;
+               }
+       }
+
+       if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+               idx = bp->pf.port_id;
+       else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+               idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
+
+       bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
+       if (nvm_param.num_bits == 1)
+               buf = &val->vbool;
+
+       data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
+                                       &data_dma_addr, GFP_KERNEL);
+       if (!data_addr)
+               return -ENOMEM;
+
+       req->data_addr = cpu_to_le64(data_dma_addr);
+       req->data_len = cpu_to_le16(nvm_param.num_bits);
+       req->option_num = cpu_to_le16(nvm_param.offset);
+       req->index_0 = cpu_to_le16(idx);
+       if (idx)
+               req->dimensions = cpu_to_le16(1);
+
+       if (req->req_type == HWRM_NVM_SET_VARIABLE)
+               memcpy(data_addr, buf, bytesize);
+
+       rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
+       if (!rc && req->req_type == HWRM_NVM_GET_VARIABLE)
+               memcpy(buf, data_addr, bytesize);
+
+       dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+       if (rc)
+               return -EIO;
+       return 0;
+}
+
+static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
+                                struct devlink_param_gset_ctx *ctx)
+{
+       struct hwrm_nvm_get_variable_input req = {0};
+       struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
+       return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+}
+
+static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
+                                struct devlink_param_gset_ctx *ctx)
+{
+       struct hwrm_nvm_set_variable_input req = {0};
+       struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+       return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+}
+
+static const struct devlink_param bnxt_dl_params[] = {
+       DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
+                             BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+                             bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+                             NULL),
+};
+
 int bnxt_dl_register(struct bnxt *bp)
 {
        struct devlink *dl;
        int rc;
 
-       if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
-               return 0;
-
-       if (bp->hwrm_spec_code < 0x10803) {
-               netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n");
+       if (bp->hwrm_spec_code < 0x10600) {
+               netdev_warn(bp->dev, "Firmware does not support NVM params");
                return -ENOTSUPP;
        }
 
@@ -41,16 +124,34 @@ int bnxt_dl_register(struct bnxt *bp)
        }
 
        bnxt_link_bp_to_dl(bp, dl);
-       bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
+       /* Add switchdev eswitch mode setting, if SRIOV supported */
+       if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
+           bp->hwrm_spec_code > 0x10803)
+               bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
        rc = devlink_register(dl, &bp->pdev->dev);
        if (rc) {
-               bnxt_link_bp_to_dl(bp, NULL);
-               devlink_free(dl);
                netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
-               return rc;
+               goto err_dl_free;
+       }
+
+       rc = devlink_params_register(dl, bnxt_dl_params,
+                                    ARRAY_SIZE(bnxt_dl_params));
+       if (rc) {
+               netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
+                           rc);
+               goto err_dl_unreg;
        }
 
        return 0;
+
+err_dl_unreg:
+       devlink_unregister(dl);
+err_dl_free:
+       bnxt_link_bp_to_dl(bp, NULL);
+       devlink_free(dl);
+       return rc;
 }
 
 void bnxt_dl_unregister(struct bnxt *bp)
@@ -60,6 +161,8 @@ void bnxt_dl_unregister(struct bnxt *bp)
        if (!dl)
                return;
 
+       devlink_params_unregister(dl, bnxt_dl_params,
+                                 ARRAY_SIZE(bnxt_dl_params));
        devlink_unregister(dl);
        devlink_free(dl);
 }
index e92a35d8b64204da64e240047a0cb914cc5cb354..2f68dc048390b84300cefd73a974fe3826bb3fdb 100644 (file)
@@ -33,6 +33,21 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
        }
 }
 
+#define NVM_OFF_ENABLE_SRIOV           401
+
+enum bnxt_nvm_dir_type {
+       BNXT_NVM_SHARED_CFG = 40,
+       BNXT_NVM_PORT_CFG,
+       BNXT_NVM_FUNC_CFG,
+};
+
+struct bnxt_dl_nvm_param {
+       u16 id;
+       u16 offset;
+       u16 dir_type;
+       u16 num_bits;
+};
+
 int bnxt_dl_register(struct bnxt *bp);
 void bnxt_dl_unregister(struct bnxt *bp);
 
index 0fe0ea8dce6c7bd3fd64c9dea98daeb3529c9147..c75d7fa6dab66262783a6e79755c85ddaeb76645 100644 (file)
@@ -6201,6 +6201,19 @@ struct hwrm_nvm_install_update_cmd_err {
        u8      unused_0[7];
 };
 
+struct hwrm_nvm_variable_input {
+       __le16  req_type;
+       __le16  cmpl_ring;
+       __le16  seq_id;
+       __le16  target_id;
+       __le64  resp_addr;
+       __le64  data_addr;
+       __le16  data_len;
+       __le16  option_num;
+       __le16  dimensions;
+       __le16  index_0;
+};
+
 /* hwrm_nvm_get_variable_input (size:320b/40B) */
 struct hwrm_nvm_get_variable_input {
        __le16  req_type;
index 795f45024c209e65591a3e9fe60814315ebb3cb0..d0699f39ba346442c1d067e273bf688680bfd103 100644 (file)
@@ -1544,22 +1544,16 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp)
 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
                         struct tc_cls_flower_offload *cls_flower)
 {
-       int rc = 0;
-
        switch (cls_flower->command) {
        case TC_CLSFLOWER_REPLACE:
-               rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
-               break;
-
+               return bnxt_tc_add_flow(bp, src_fid, cls_flower);
        case TC_CLSFLOWER_DESTROY:
-               rc = bnxt_tc_del_flow(bp, cls_flower);
-               break;
-
+               return bnxt_tc_del_flow(bp, cls_flower);
        case TC_CLSFLOWER_STATS:
-               rc = bnxt_tc_get_flow_stats(bp, cls_flower);
-               break;
+               return bnxt_tc_get_flow_stats(bp, cls_flower);
+       default:
+               return -EOPNOTSUPP;
        }
-       return rc;
 }
 
 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
index 05d4059059062463ee8fa38d16a9a708f96cb411..e31f5d803c1305f71159ca6e2c8bc5ed5f94afa2 100644 (file)
@@ -173,7 +173,7 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
                                             bnxt_vf_rep_setup_tc_block_cb,
-                                            vf_rep, vf_rep);
+                                            vf_rep, vf_rep, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block,
                                        bnxt_vf_rep_setup_tc_block_cb, vf_rep);
@@ -543,9 +543,14 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
                break;
 
        case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+               if (bp->hwrm_spec_code < 0x10803) {
+                       netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
+                       rc = -ENOTSUPP;
+                       goto done;
+               }
+
                if (pci_num_vf(bp->pdev) == 0) {
-                       netdev_info(bp->dev,
-                                   "Enable VFs before setting switchdev mode");
+                       netdev_info(bp->dev, "Enable VFs before setting switchdev mode");
                        rc = -EPERM;
                        goto done;
                }
index 1f0e872d06675878c059ab54980c177937630ef6..0584d07c8c33c53a972a64d37e79984e7eb95499 100644 (file)
@@ -219,7 +219,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
                rc = bnxt_xdp_set(bp, xdp->prog);
                break;
        case XDP_QUERY_PROG:
-               xdp->prog_attached = !!bp->xdp_prog;
                xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
                rc = 0;
                break;
index 30273a7717e2df797890da57e229ce31e9d957e2..d83233ae4a15f318ea89acb5da927e71501c2a5e 100644 (file)
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
        id_tbl->max = size;
        id_tbl->next = next;
        spin_lock_init(&id_tbl->lock);
-       id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
+       id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
        if (!id_tbl->table)
                return -ENOMEM;
 
@@ -2562,7 +2562,6 @@ static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
 
 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
 {
-       struct fcoe_kwqe_destroy *req;
        union l5cm_specific_data l5_data;
        struct cnic_local *cp = dev->cnic_priv;
        struct bnx2x *bp = netdev_priv(dev->netdev);
@@ -2571,7 +2570,6 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
 
        cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
 
-       req = (struct fcoe_kwqe_destroy *) kwqe;
        cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
 
        memset(&l5_data, 0, sizeof(l5_data));
@@ -4090,7 +4088,7 @@ static void cnic_cm_free_mem(struct cnic_dev *dev)
 {
        struct cnic_local *cp = dev->cnic_priv;
 
-       kfree(cp->csk_tbl);
+       kvfree(cp->csk_tbl);
        cp->csk_tbl = NULL;
        cnic_free_id_tbl(&cp->csk_port_tbl);
 }
@@ -4100,8 +4098,8 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
        struct cnic_local *cp = dev->cnic_priv;
        u32 port_id;
 
-       cp->csk_tbl = kcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
-                             GFP_KERNEL);
+       cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
+                              GFP_KERNEL);
        if (!cp->csk_tbl)
                return -ENOMEM;
 
@@ -5091,13 +5089,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
        struct cnic_local *cp = dev->cnic_priv;
        struct bnx2x *bp = netdev_priv(dev->netdev);
        struct cnic_eth_dev *ethdev = cp->ethdev;
-       int func, ret;
+       int ret;
        u32 pfid;
 
        dev->stats_addr = ethdev->addr_drv_info_to_mcp;
        cp->func = bp->pf_num;
 
-       func = CNIC_FUNC(cp);
        pfid = bp->pfid;
 
        ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
index 3be87efdc93d6347da8417ddcd101ed90cc12d8c..0a796d5ec8931d1f532fe38fa5810a756cbc149b 100644 (file)
@@ -721,6 +721,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
        case TG3_APE_LOCK_GPIO:
                if (tg3_asic_rev(tp) == ASIC_REV_5761)
                        return 0;
+               /* else: fall through */
        case TG3_APE_LOCK_GRC:
        case TG3_APE_LOCK_MEM:
                if (!tp->pci_fn)
@@ -781,6 +782,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
        case TG3_APE_LOCK_GPIO:
                if (tg3_asic_rev(tp) == ASIC_REV_5761)
                        return;
+               /* else: fall through */
        case TG3_APE_LOCK_GRC:
        case TG3_APE_LOCK_MEM:
                if (!tp->pci_fn)
@@ -10706,28 +10708,40 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
        switch (limit) {
        case 16:
                tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
+               /* fall through */
        case 15:
                tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
+               /* fall through */
        case 14:
                tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
+               /* fall through */
        case 13:
                tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
+               /* fall through */
        case 12:
                tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
+               /* fall through */
        case 11:
                tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
+               /* fall through */
        case 10:
                tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
+               /* fall through */
        case 9:
                tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
+               /* fall through */
        case 8:
                tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
+               /* fall through */
        case 7:
                tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
+               /* fall through */
        case 6:
                tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
+               /* fall through */
        case 5:
                tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
+               /* fall through */
        case 4:
                /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
        case 3:
index 427d65a1a1261095a402b833660c83da0113e4c7..b9984015ca8c1a680d6576dad3864aa2c7f99854 100644 (file)
@@ -2,7 +2,7 @@
 # Atmel device configuration
 #
 
-config NET_CADENCE
+config NET_VENDOR_CADENCE
        bool "Cadence devices"
        depends on HAS_IOMEM
        default y
@@ -16,7 +16,7 @@ config NET_CADENCE
          the remaining Atmel network card questions. If you say Y, you will be
          asked for your specific card in the following questions.
 
-if NET_CADENCE
+if NET_VENDOR_CADENCE
 
 config MACB
        tristate "Cadence MACB/GEM support"
@@ -48,4 +48,4 @@ config MACB_PCI
          To compile this driver as a module, choose M here: the module
          will be called macb_pci.
 
-endif # NET_CADENCE
+endif # NET_VENDOR_CADENCE
index 3e93df5d4e3b2573f88cc427e7eefc6d1930e3ff..96cc03a6d9420f52c7c871f52f32c8e9b8973bbb 100644 (file)
@@ -3726,6 +3726,8 @@ static int at91ether_init(struct platform_device *pdev)
        int err;
        u32 reg;
 
+       bp->queues[0].bp = bp;
+
        dev->netdev_ops = &at91ether_netdev_ops;
        dev->ethtool_ops = &macb_ethtool_ops;
 
index 2220c771092b46e8fb583d46ea99d5829e1793d0..678835136bf8069326067feaa46f8465db4e38d4 100644 (file)
@@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 
        if (delta > TSU_NSEC_MAX_VAL) {
                gem_tsu_get_time(&bp->ptp_clock_info, &now);
-               if (sign)
-                       now = timespec64_sub(now, then);
-               else
-                       now = timespec64_add(now, then);
+               now = timespec64_add(now, then);
 
                gem_tsu_set_time(&bp->ptp_clock_info,
                                 (const struct timespec64 *)&now);
index 07d2201530d26c85e26cf0987553451acad936a6..9fdd496b90ff47cb0f1147777ae7b9ca0071076d 100644 (file)
@@ -1,6 +1,6 @@
 config NET_CALXEDA_XGMAC
        tristate "Calxeda 1G/10G XGMAC Ethernet driver"
-       depends on HAS_IOMEM && HAS_DMA
+       depends on HAS_IOMEM
        depends on ARCH_HIGHBANK || COMPILE_TEST
        select CRC32
        help
index 043e3c11c42bd407d47561bec2a2e0acd525f12b..4c3a5c354497555c3b7bff8213ba2357143e036a 100644 (file)
@@ -4,7 +4,6 @@
 
 config NET_VENDOR_CAVIUM
        bool "Cavium ethernet drivers"
-       depends on PCI
        default y
        ---help---
          Select this option if you want enable Cavium network support.
index 929d485a3a2fea6b7f13f389c9e90cda183da737..e088dedc17478353f229ca084151bedb01465c40 100644 (file)
@@ -493,6 +493,9 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
        for (q_no = srn; q_no < ern; q_no++) {
                reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
 
+               /* clear IPTR */
+               reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
                /* set DPTR */
                reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
 
index 9338a00083788059736edefd472710c00b30f9af..1f8b7f65125401ef250f9cf57c266fbb55da3f38 100644 (file)
@@ -165,6 +165,9 @@ static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
                reg_val =
                    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
 
+               /* clear IPTR */
+               reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
                /* set DPTR */
                reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
 
index 8a815bb5717732331293e9fba5b00d3ca23aaf88..4edb1584b32fcba13dcda91ed6a4dc8e0d5345fc 100644 (file)
@@ -684,7 +684,7 @@ static void lio_sync_octeon_time(struct work_struct *work)
        lt = (struct lio_time *)sc->virtdptr;
 
        /* Get time of the day */
-       getnstimeofday64(&ts);
+       ktime_get_real_ts64(&ts);
        lt->sec = ts.tv_sec;
        lt->nsec = ts.tv_nsec;
        octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
@@ -2628,7 +2628,7 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
 
        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
        if (ret < 0) {
-               dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+               dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
                        ret);
        }
        return ret;
@@ -3299,7 +3299,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
 {
        struct lio *lio = NULL;
        struct net_device *netdev;
-       u8 mac[6], i, j, *fw_ver;
+       u8 mac[6], i, j, *fw_ver, *micro_ver;
+       unsigned long micro;
+       u32 cur_ver;
        struct octeon_soft_command *sc;
        struct liquidio_if_cfg_context *ctx;
        struct liquidio_if_cfg_resp *resp;
@@ -3429,6 +3431,14 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
                                 fw_ver);
                }
 
+               /* extract micro version field; point past '<maj>.<min>.' */
+               micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
+               if (kstrtoul(micro_ver, 10, &micro) != 0)
+                       micro = 0;
+               octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
+               octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
+               octeon_dev->fw_info.ver.rev = micro;
+
                octeon_swap_8B_data((u64 *)(&resp->cfg_info),
                                    (sizeof(struct liquidio_if_cfg_info)) >> 3);
 
@@ -3569,9 +3579,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
                for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
                        u8 vfmac[ETH_ALEN];
 
-                       random_ether_addr(&vfmac[0]);
-                       if (__liquidio_set_vf_mac(netdev, j,
-                                                 &vfmac[0], false)) {
+                       eth_random_addr(vfmac);
+                       if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
                                dev_err(&octeon_dev->pci_dev->dev,
                                        "Error setting VF%d MAC address\n",
                                        j);
@@ -3672,7 +3681,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
                        OCTEON_CN2350_25GB_SUBSYS_ID ||
                    octeon_dev->subsystem_id ==
                        OCTEON_CN2360_25GB_SUBSYS_ID) {
-                       liquidio_get_speed(lio);
+                       cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
+                                            octeon_dev->fw_info.ver.min,
+                                            octeon_dev->fw_info.ver.rev);
+
+                       /* speed control unsupported in f/w older than 1.7.2 */
+                       if (cur_ver < OCT_FW_VER(1, 7, 2)) {
+                               dev_info(&octeon_dev->pci_dev->dev,
+                                        "speed setting not supported by f/w.");
+                               octeon_dev->speed_setting = 25;
+                               octeon_dev->no_speed_setting = 1;
+                       } else {
+                               liquidio_get_speed(lio);
+                       }
 
                        if (octeon_dev->speed_setting == 0) {
                                octeon_dev->speed_setting = 25;
index 7fa0212873aceaf876dd144a2d5e06dd199659b6..b77835724dc84d037c88bcb9ef7153db8f1f6e48 100644 (file)
@@ -1693,7 +1693,7 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev,
 
        ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
        if (ret < 0) {
-               dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+               dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
                        ret);
        }
        return ret;
index 7f97ae48efed74a306ec4ec0851761435ae4344d..0cc2338d8d2a81216d7a91e0d48ad704b7921fbd 100644 (file)
@@ -902,7 +902,7 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
         *
         * Octeon always uses UTC time. so timezone information is not sent.
         */
-       getnstimeofday64(&ts);
+       ktime_get_real_ts64(&ts);
        ret = snprintf(boottime, MAX_BOOTTIME_SIZE,
                       " time_sec=%lld time_nsec=%ld",
                       (s64)ts.tv_sec, ts.tv_nsec);
index 94a4ed88d6188ca4ed44ab5ebd2673832d33c254..d99ca6ba23a4f4e9f182e7e491fec6066a2f9a04 100644 (file)
@@ -288,8 +288,17 @@ struct oct_fw_info {
         */
        u32 app_mode;
        char   liquidio_firmware_version[32];
+       /* Fields extracted from legacy string 'liquidio_firmware_version' */
+       struct {
+               u8  maj;
+               u8  min;
+               u8  rev;
+       } ver;
 };
 
+#define OCT_FW_VER(maj, min, rev) \
+       (((u32)(maj) << 16) | ((u32)(min) << 8) | ((u32)(rev)))
+
 /* wrappers around work structs */
 struct cavium_wk {
        struct delayed_work work;
index 1f2e75da28f833c2f6973670fa5d113184b606fc..d5d9e47daa4bedfde9e79b13cea25453d2a67a29 100644 (file)
@@ -110,8 +110,8 @@ int octeon_init_instr_queue(struct octeon_device *oct,
 
        memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
 
-       dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
-               iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
+       dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
+               iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
 
        iq->txpciq.u64 = txpciq.u64;
        iq->fill_threshold = (u32)conf->db_min;
index 135766c4296b737c7ffbf34026d61e3f0cf9d13d..768f584f8392732b19d6e889ed456a3c6de8e809 100644 (file)
@@ -1848,7 +1848,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
        case XDP_SETUP_PROG:
                return nicvf_xdp_setup(nic, xdp->prog);
        case XDP_QUERY_PROG:
-               xdp->prog_attached = !!nic->xdp_prog;
                xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
                return 0;
        default:
index 3c5057868ab3a94758529375b4125da02a47fb8f..aaf7985aef4ccddf6120fd24eaa33cba119fd652 100644 (file)
@@ -281,12 +281,18 @@ struct cudbg_tid_data {
 
 #define CUDBG_NUM_ULPTX 11
 #define CUDBG_NUM_ULPTX_READ 512
+#define CUDBG_NUM_ULPTX_ASIC 6
+#define CUDBG_NUM_ULPTX_ASIC_READ 128
+
+#define CUDBG_ULPTX_LA_REV 1
 
 struct cudbg_ulptx_la {
        u32 rdptr[CUDBG_NUM_ULPTX];
        u32 wrptr[CUDBG_NUM_ULPTX];
        u32 rddata[CUDBG_NUM_ULPTX];
        u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ];
+       u32 rdptr_asic[CUDBG_NUM_ULPTX_ASIC_READ];
+       u32 rddata_asic[CUDBG_NUM_ULPTX_ASIC_READ][CUDBG_NUM_ULPTX_ASIC];
 };
 
 #define CUDBG_CHAC_PBT_ADDR 0x2800
index 0afcfe99bff304acaf2a701d2e61db40e6629074..b1eb843035ee52069954b76238225fc2961f823f 100644 (file)
@@ -2586,15 +2586,24 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
        struct adapter *padap = pdbg_init->adap;
        struct cudbg_buffer temp_buff = { 0 };
        struct cudbg_ulptx_la *ulptx_la_buff;
+       struct cudbg_ver_hdr *ver_hdr;
        u32 i, j;
        int rc;
 
-       rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
+       rc = cudbg_get_buff(pdbg_init, dbg_buff,
+                           sizeof(struct cudbg_ver_hdr) +
+                           sizeof(struct cudbg_ulptx_la),
                            &temp_buff);
        if (rc)
                return rc;
 
-       ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
+       ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
+       ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
+       ver_hdr->revision = CUDBG_ULPTX_LA_REV;
+       ver_hdr->size = sizeof(struct cudbg_ulptx_la);
+
+       ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
+                                                 sizeof(*ver_hdr));
        for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
                ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
                                                      ULP_TX_LA_RDPTR_0_A +
@@ -2610,6 +2619,25 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
                                t4_read_reg(padap,
                                            ULP_TX_LA_RDDATA_0_A + 0x10 * i);
        }
+
+       for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
+               t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
+               ulptx_la_buff->rdptr_asic[i] =
+                               t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
+               ulptx_la_buff->rddata_asic[i][0] =
+                               t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
+               ulptx_la_buff->rddata_asic[i][1] =
+                               t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
+               ulptx_la_buff->rddata_asic[i][2] =
+                               t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
+               ulptx_la_buff->rddata_asic[i][3] =
+                               t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
+               ulptx_la_buff->rddata_asic[i][4] =
+                               t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
+               ulptx_la_buff->rddata_asic[i][5] =
+                               t4_read_reg(padap, PM_RX_BASE_ADDR);
+       }
+
        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
 
index 0dbe2d9e22d60d12a2e04770210d2c56fe4279ce..3da9299cd786e399b31852bbbf9544a9fc57c8fb 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/spinlock.h>
 #include <linux/timer.h>
 #include <linux/vmalloc.h>
+#include <linux/rhashtable.h>
 #include <linux/etherdevice.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
@@ -319,6 +320,21 @@ struct vpd_params {
        u8 na[MACADDR_LEN + 1];
 };
 
+/* Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+       unsigned int nvi;               /* N virtual interfaces */
+       unsigned int neq;               /* N egress Qs */
+       unsigned int nethctrl;          /* N egress ETH or CTRL Qs */
+       unsigned int niqflint;          /* N ingress Qs/w free list(s) & intr */
+       unsigned int niq;               /* N ingress Qs */
+       unsigned int tc;                /* PCI-E traffic class */
+       unsigned int pmask;             /* port access rights mask */
+       unsigned int nexactf;           /* N exact MPS filters */
+       unsigned int r_caps;            /* read capabilities */
+       unsigned int wx_caps;           /* write/execute capabilities */
+};
+
 struct pci_params {
        unsigned int vpd_cap_addr;
        unsigned char speed;
@@ -346,6 +362,7 @@ struct adapter_params {
        struct sge_params sge;
        struct tp_params  tp;
        struct vpd_params vpd;
+       struct pf_resources pfres;
        struct pci_params pci;
        struct devlog_params devlog;
        enum pcie_memwin drv_memwin;
@@ -521,6 +538,15 @@ enum {
        MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
 };
 
+enum {
+       PRIV_FLAG_PORT_TX_VM_BIT,
+};
+
+#define PRIV_FLAG_PORT_TX_VM           BIT(PRIV_FLAG_PORT_TX_VM_BIT)
+
+#define PRIV_FLAGS_ADAP                        0
+#define PRIV_FLAGS_PORT                        PRIV_FLAG_PORT_TX_VM
+
 struct adapter;
 struct sge_rspq;
 
@@ -557,6 +583,7 @@ struct port_info {
        struct hwtstamp_config tstamp_config;
        bool ptp_enable;
        struct sched_table *sched_tbl;
+       u32 eth_flags;
 };
 
 struct dentry;
@@ -867,6 +894,7 @@ struct adapter {
        unsigned int flags;
        unsigned int adap_idx;
        enum chip_type chip;
+       u32 eth_flags;
 
        int msg_enable;
        __be16 vxlan_port;
@@ -956,6 +984,7 @@ struct adapter {
        struct chcr_stats_debug chcr_stats;
 
        /* TC flower offload */
+       bool tc_flower_initialized;
        struct rhashtable flower_tbl;
        struct rhashtable_params flower_ht_params;
        struct timer_list flower_stats_timer;
@@ -1333,7 +1362,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
 void t4_free_sge_resources(struct adapter *adap);
 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
 irq_handler_t t4_intr_handler(struct adapter *adap);
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                     const struct pkt_gl *gl);
 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
@@ -1555,6 +1584,7 @@ int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
 int t4_seeprom_wp(struct adapter *adapter, bool enable);
 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_pfres(struct adapter *adapter);
 int t4_read_flash(struct adapter *adapter, unsigned int addr,
                  unsigned int nwords, u32 *data, int byte_oriented);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
index 8d751efcb90e58b5161a491921d204ce197870c2..55b46592af282b9beca434540862deaacccc9ea1 100644 (file)
@@ -273,7 +273,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
                }
                break;
        case CUDBG_ULPTX_LA:
-               len = sizeof(struct cudbg_ulptx_la);
+               len = sizeof(struct cudbg_ver_hdr) +
+                     sizeof(struct cudbg_ulptx_la);
                break;
        case CUDBG_UP_CIM_INDIRECT:
                n = 0;
index c301aaf79d647d2c0f2a1b08ebc1823d6079377f..511606fd1b20f4ea189426a53eb108888a6295f1 100644 (file)
@@ -2414,6 +2414,44 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
        .release = seq_release_private
 };
 
+static int resources_show(struct seq_file *seq, void *v)
+{
+       struct adapter *adapter = seq->private;
+       struct pf_resources *pfres = &adapter->params.pfres;
+
+       #define S(desc, fmt, var) \
+               seq_printf(seq, "%-60s " fmt "\n", \
+                          desc " (" #var "):", pfres->var)
+
+       S("Virtual Interfaces", "%d", nvi);
+       S("Egress Queues", "%d", neq);
+       S("Ethernet Control", "%d", nethctrl);
+       S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
+       S("Ingress Queues", "%d", niq);
+       S("Traffic Class", "%d", tc);
+       S("Port Access Rights Mask", "%#x", pmask);
+       S("MAC Address Filters", "%d", nexactf);
+       S("Firmware Command Read Capabilities", "%#x", r_caps);
+       S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
+
+       #undef S
+
+       return 0;
+}
+
+static int resources_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, resources_show, inode->i_private);
+}
+
+static const struct file_operations resources_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = resources_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
 /**
  * ethqset2pinfo - return port_info of an Ethernet Queue Set
  * @adap: the adapter
@@ -2924,6 +2962,169 @@ static const struct file_operations chcr_stats_debugfs_fops = {
         .llseek  = seq_lseek,
         .release = single_release,
 };
+
+#define PRINT_ADAP_STATS(string, value) \
+       seq_printf(seq, "%-25s %-20llu\n", (string), \
+                  (unsigned long long)(value))
+
+#define PRINT_CH_STATS(string, value) \
+do { \
+       seq_printf(seq, "%-25s ", (string)); \
+       for (i = 0; i < adap->params.arch.nchan; i++) \
+               seq_printf(seq, "%-20llu ", \
+                          (unsigned long long)stats.value[i]); \
+       seq_printf(seq, "\n"); \
+} while (0)
+
+#define PRINT_CH_STATS2(string, value) \
+do { \
+       seq_printf(seq, "%-25s ", (string)); \
+       for (i = 0; i < adap->params.arch.nchan; i++) \
+               seq_printf(seq, "%-20llu ", \
+                          (unsigned long long)stats[i].value); \
+       seq_printf(seq, "\n"); \
+} while (0)
+
+static void show_tcp_stats(struct seq_file *seq)
+{
+       struct adapter *adap = seq->private;
+       struct tp_tcp_stats v4, v6;
+
+       spin_lock(&adap->stats_lock);
+       t4_tp_get_tcp_stats(adap, &v4, &v6, false);
+       spin_unlock(&adap->stats_lock);
+
+       PRINT_ADAP_STATS("tcp_ipv4_out_rsts:", v4.tcp_out_rsts);
+       PRINT_ADAP_STATS("tcp_ipv4_in_segs:", v4.tcp_in_segs);
+       PRINT_ADAP_STATS("tcp_ipv4_out_segs:", v4.tcp_out_segs);
+       PRINT_ADAP_STATS("tcp_ipv4_retrans_segs:", v4.tcp_retrans_segs);
+       PRINT_ADAP_STATS("tcp_ipv6_out_rsts:", v6.tcp_out_rsts);
+       PRINT_ADAP_STATS("tcp_ipv6_in_segs:", v6.tcp_in_segs);
+       PRINT_ADAP_STATS("tcp_ipv6_out_segs:", v6.tcp_out_segs);
+       PRINT_ADAP_STATS("tcp_ipv6_retrans_segs:", v6.tcp_retrans_segs);
+}
+
+static void show_ddp_stats(struct seq_file *seq)
+{
+       struct adapter *adap = seq->private;
+       struct tp_usm_stats stats;
+
+       spin_lock(&adap->stats_lock);
+       t4_get_usm_stats(adap, &stats, false);
+       spin_unlock(&adap->stats_lock);
+
+       PRINT_ADAP_STATS("usm_ddp_frames:", stats.frames);
+       PRINT_ADAP_STATS("usm_ddp_octets:", stats.octets);
+       PRINT_ADAP_STATS("usm_ddp_drops:", stats.drops);
+}
+
+static void show_rdma_stats(struct seq_file *seq)
+{
+       struct adapter *adap = seq->private;
+       struct tp_rdma_stats stats;
+
+       spin_lock(&adap->stats_lock);
+       t4_tp_get_rdma_stats(adap, &stats, false);
+       spin_unlock(&adap->stats_lock);
+
+       PRINT_ADAP_STATS("rdma_no_rqe_mod_defer:", stats.rqe_dfr_mod);
+       PRINT_ADAP_STATS("rdma_no_rqe_pkt_defer:", stats.rqe_dfr_pkt);
+}
+
+static void show_tp_err_adapter_stats(struct seq_file *seq)
+{
+       struct adapter *adap = seq->private;
+       struct tp_err_stats stats;
+
+       spin_lock(&adap->stats_lock);
+       t4_tp_get_err_stats(adap, &stats, false);
+       spin_unlock(&adap->stats_lock);
+
+       PRINT_ADAP_STATS("tp_err_ofld_no_neigh:", stats.ofld_no_neigh);
+       PRINT_ADAP_STATS("tp_err_ofld_cong_defer:", stats.ofld_cong_defer);
+}
+
+static void show_cpl_stats(struct seq_file *seq)
+{
+       struct adapter *adap = seq->private;
+       struct tp_cpl_stats stats;
+       u8 i;
+
+       spin_lock(&adap->stats_lock);
+       t4_tp_get_cpl_stats(adap, &stats, false);
+       spin_unlock(&adap->stats_lock);
+
+       PRINT_CH_STATS("tp_cpl_requests:", req);
+       PRINT_CH_STATS("tp_cpl_responses:", rsp);
+}
+
+static void show_tp_err_channel_stats(struct seq_file *seq)
+{
+       struct adapter *adap = seq->private;
+       struct tp_err_stats stats;
+       u8 i;
+
+       spin_lock(&adap->stats_lock);
+       t4_tp_get_err_stats(adap, &stats, false);
+       spin_unlock(&adap->stats_lock);
+
+       PRINT_CH_STATS("tp_mac_in_errs:", mac_in_errs);
+       PRINT_CH_STATS("tp_hdr_in_errs:", hdr_in_errs);
+       PRINT_CH_STATS("tp_tcp_in_errs:", tcp_in_errs);
+       PRINT_CH_STATS("tp_tcp6_in_errs:", tcp6_in_errs);
+       PRINT_CH_STATS("tp_tnl_cong_drops:", tnl_cong_drops);
+       PRINT_CH_STATS("tp_tnl_tx_drops:", tnl_tx_drops);
+       PRINT_CH_STATS("tp_ofld_vlan_drops:", ofld_vlan_drops);
+       PRINT_CH_STATS("tp_ofld_chan_drops:", ofld_chan_drops);
+}
+
+static void show_fcoe_stats(struct seq_file *seq)
+{
+       struct adapter *adap = seq->private;
+       struct tp_fcoe_stats stats[NCHAN];
+       u8 i;
+
+       spin_lock(&adap->stats_lock);
+       for (i = 0; i < adap->params.arch.nchan; i++)
+               t4_get_fcoe_stats(adap, i, &stats[i], false);
+       spin_unlock(&adap->stats_lock);
+
+       PRINT_CH_STATS2("fcoe_octets_ddp", octets_ddp);
+       PRINT_CH_STATS2("fcoe_frames_ddp", frames_ddp);
+       PRINT_CH_STATS2("fcoe_frames_drop", frames_drop);
+}
+
+#undef PRINT_CH_STATS2
+#undef PRINT_CH_STATS
+#undef PRINT_ADAP_STATS
+
+static int tp_stats_show(struct seq_file *seq, void *v)
+{
+       struct adapter *adap = seq->private;
+
+       seq_puts(seq, "\n--------Adapter Stats--------\n");
+       show_tcp_stats(seq);
+       show_ddp_stats(seq);
+       show_rdma_stats(seq);
+       show_tp_err_adapter_stats(seq);
+
+       seq_puts(seq, "\n-------- Channel Stats --------\n");
+       if (adap->params.arch.nchan == NCHAN)
+               seq_printf(seq, "%-25s %-20s %-20s %-20s %-20s\n",
+                          " ", "channel 0", "channel 1",
+                          "channel 2", "channel 3");
+       else
+               seq_printf(seq, "%-25s %-20s %-20s\n",
+                          " ", "channel 0", "channel 1");
+       show_cpl_stats(seq);
+       show_tp_err_channel_stats(seq);
+       show_fcoe_stats(seq);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats);
+
 /* Add an array of Debug FS files.
  */
 void add_debugfs_files(struct adapter *adap,
@@ -2973,6 +3174,7 @@ int t4_setup_debugfs(struct adapter *adap)
                { "rss_key", &rss_key_debugfs_fops, 0400, 0 },
                { "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
                { "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
+               { "resources", &resources_debugfs_fops, 0400, 0 },
                { "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 },
                { "ibq_tp0",  &cim_ibq_fops, 0400, 0 },
                { "ibq_tp1",  &cim_ibq_fops, 0400, 1 },
@@ -2999,6 +3201,7 @@ int t4_setup_debugfs(struct adapter *adap)
                { "blocked_fl", &blocked_fl_fops, 0600, 0 },
                { "meminfo", &meminfo_fops, 0400, 0 },
                { "crypto", &chcr_stats_debugfs_fops, 0400, 0 },
+               { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 },
        };
 
        /* Debug FS nodes common to all T5 and later adapters.
index f7eef93ffc87d440fcf067d83e1f082a256f38b1..d07230c892a546d6e2a31f7cddb2f51116cd9035 100644 (file)
@@ -115,42 +115,10 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
        "db_drop                ",
        "db_full                ",
        "db_empty               ",
-       "tcp_ipv4_out_rsts      ",
-       "tcp_ipv4_in_segs       ",
-       "tcp_ipv4_out_segs      ",
-       "tcp_ipv4_retrans_segs  ",
-       "tcp_ipv6_out_rsts      ",
-       "tcp_ipv6_in_segs       ",
-       "tcp_ipv6_out_segs      ",
-       "tcp_ipv6_retrans_segs  ",
-       "usm_ddp_frames         ",
-       "usm_ddp_octets         ",
-       "usm_ddp_drops          ",
-       "rdma_no_rqe_mod_defer  ",
-       "rdma_no_rqe_pkt_defer  ",
-       "tp_err_ofld_no_neigh   ",
-       "tp_err_ofld_cong_defer ",
        "write_coal_success     ",
        "write_coal_fail        ",
 };
 
-static char channel_stats_strings[][ETH_GSTRING_LEN] = {
-       "--------Channel--------- ",
-       "tp_cpl_requests        ",
-       "tp_cpl_responses       ",
-       "tp_mac_in_errs         ",
-       "tp_hdr_in_errs         ",
-       "tp_tcp_in_errs         ",
-       "tp_tcp6_in_errs        ",
-       "tp_tnl_cong_drops      ",
-       "tp_tnl_tx_drops        ",
-       "tp_ofld_vlan_drops     ",
-       "tp_ofld_chan_drops     ",
-       "fcoe_octets_ddp        ",
-       "fcoe_frames_ddp        ",
-       "fcoe_frames_drop       ",
-};
-
 static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
        "-------Loopback----------- ",
        "octets_ok              ",
@@ -177,14 +145,19 @@ static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
        "bg3_frames_trunc       ",
 };
 
+static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
+       [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
+};
+
 static int get_sset_count(struct net_device *dev, int sset)
 {
        switch (sset) {
        case ETH_SS_STATS:
                return ARRAY_SIZE(stats_strings) +
                       ARRAY_SIZE(adapter_stats_strings) +
-                      ARRAY_SIZE(channel_stats_strings) +
                       ARRAY_SIZE(loopback_stats_strings);
+       case ETH_SS_PRIV_FLAGS:
+               return ARRAY_SIZE(cxgb4_priv_flags_strings);
        default:
                return -EOPNOTSUPP;
        }
@@ -235,6 +208,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
                         FW_HDR_FW_VER_MINOR_G(exprom_vers),
                         FW_HDR_FW_VER_MICRO_G(exprom_vers),
                         FW_HDR_FW_VER_BUILD_G(exprom_vers));
+       info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
 }
 
 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -245,11 +219,11 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
                memcpy(data, adapter_stats_strings,
                       sizeof(adapter_stats_strings));
                data += sizeof(adapter_stats_strings);
-               memcpy(data, channel_stats_strings,
-                      sizeof(channel_stats_strings));
-               data += sizeof(channel_stats_strings);
                memcpy(data, loopback_stats_strings,
                       sizeof(loopback_stats_strings));
+       } else if (stringset == ETH_SS_PRIV_FLAGS) {
+               memcpy(data, cxgb4_priv_flags_strings,
+                      sizeof(cxgb4_priv_flags_strings));
        }
 }
 
@@ -270,41 +244,10 @@ struct adapter_stats {
        u64 db_drop;
        u64 db_full;
        u64 db_empty;
-       u64 tcp_v4_out_rsts;
-       u64 tcp_v4_in_segs;
-       u64 tcp_v4_out_segs;
-       u64 tcp_v4_retrans_segs;
-       u64 tcp_v6_out_rsts;
-       u64 tcp_v6_in_segs;
-       u64 tcp_v6_out_segs;
-       u64 tcp_v6_retrans_segs;
-       u64 frames;
-       u64 octets;
-       u64 drops;
-       u64 rqe_dfr_mod;
-       u64 rqe_dfr_pkt;
-       u64 ofld_no_neigh;
-       u64 ofld_cong_defer;
        u64 wc_success;
        u64 wc_fail;
 };
 
-struct channel_stats {
-       u64 cpl_req;
-       u64 cpl_rsp;
-       u64 mac_in_errs;
-       u64 hdr_in_errs;
-       u64 tcp_in_errs;
-       u64 tcp6_in_errs;
-       u64 tnl_cong_drops;
-       u64 tnl_tx_drops;
-       u64 ofld_vlan_drops;
-       u64 ofld_chan_drops;
-       u64 octets_ddp;
-       u64 frames_ddp;
-       u64 frames_drop;
-};
-
 static void collect_sge_port_stats(const struct adapter *adap,
                                   const struct port_info *p,
                                   struct queue_port_stats *s)
@@ -327,45 +270,14 @@ static void collect_sge_port_stats(const struct adapter *adap,
 
 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
 {
-       struct tp_tcp_stats v4, v6;
-       struct tp_rdma_stats rdma_stats;
-       struct tp_err_stats err_stats;
-       struct tp_usm_stats usm_stats;
        u64 val1, val2;
 
        memset(s, 0, sizeof(*s));
 
-       spin_lock(&adap->stats_lock);
-       t4_tp_get_tcp_stats(adap, &v4, &v6, false);
-       t4_tp_get_rdma_stats(adap, &rdma_stats, false);
-       t4_get_usm_stats(adap, &usm_stats, false);
-       t4_tp_get_err_stats(adap, &err_stats, false);
-       spin_unlock(&adap->stats_lock);
-
        s->db_drop = adap->db_stats.db_drop;
        s->db_full = adap->db_stats.db_full;
        s->db_empty = adap->db_stats.db_empty;
 
-       s->tcp_v4_out_rsts = v4.tcp_out_rsts;
-       s->tcp_v4_in_segs = v4.tcp_in_segs;
-       s->tcp_v4_out_segs = v4.tcp_out_segs;
-       s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
-       s->tcp_v6_out_rsts = v6.tcp_out_rsts;
-       s->tcp_v6_in_segs = v6.tcp_in_segs;
-       s->tcp_v6_out_segs = v6.tcp_out_segs;
-       s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
-
-       if (is_offload(adap)) {
-               s->frames = usm_stats.frames;
-               s->octets = usm_stats.octets;
-               s->drops = usm_stats.drops;
-               s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
-               s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
-       }
-
-       s->ofld_no_neigh = err_stats.ofld_no_neigh;
-       s->ofld_cong_defer = err_stats.ofld_cong_defer;
-
        if (!is_t4(adap->params.chip)) {
                int v;
 
@@ -379,36 +291,6 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
        }
 }
 
-static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
-                                 u8 i)
-{
-       struct tp_cpl_stats cpl_stats;
-       struct tp_err_stats err_stats;
-       struct tp_fcoe_stats fcoe_stats;
-
-       memset(s, 0, sizeof(*s));
-
-       spin_lock(&adap->stats_lock);
-       t4_tp_get_cpl_stats(adap, &cpl_stats, false);
-       t4_tp_get_err_stats(adap, &err_stats, false);
-       t4_get_fcoe_stats(adap, i, &fcoe_stats, false);
-       spin_unlock(&adap->stats_lock);
-
-       s->cpl_req = cpl_stats.req[i];
-       s->cpl_rsp = cpl_stats.rsp[i];
-       s->mac_in_errs = err_stats.mac_in_errs[i];
-       s->hdr_in_errs = err_stats.hdr_in_errs[i];
-       s->tcp_in_errs = err_stats.tcp_in_errs[i];
-       s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
-       s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
-       s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
-       s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
-       s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
-       s->octets_ddp = fcoe_stats.octets_ddp;
-       s->frames_ddp = fcoe_stats.frames_ddp;
-       s->frames_drop = fcoe_stats.frames_drop;
-}
-
 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
                      u64 *data)
 {
@@ -428,11 +310,6 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
        collect_adapter_stats(adapter, (struct adapter_stats *)data);
        data += sizeof(struct adapter_stats) / sizeof(u64);
 
-       *data++ = (u64)pi->port_id;
-       collect_channel_stats(adapter, (struct channel_stats *)data,
-                             pi->port_id);
-       data += sizeof(struct channel_stats) / sizeof(u64);
-
        *data++ = (u64)pi->port_id;
        memset(&s, 0, sizeof(s));
        t4_get_lb_stats(adapter, pi->port_id, &s);
@@ -751,13 +628,10 @@ static int get_link_ksettings(struct net_device *dev,
        fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
                       link_ksettings->link_modes.lp_advertising);
 
-       if (netif_carrier_ok(dev)) {
-               base->speed = pi->link_cfg.speed;
-               base->duplex = DUPLEX_FULL;
-       } else {
-               base->speed = SPEED_UNKNOWN;
-               base->duplex = DUPLEX_UNKNOWN;
-       }
+       base->speed = (netif_carrier_ok(dev)
+                      ? pi->link_cfg.speed
+                      : SPEED_UNKNOWN);
+       base->duplex = DUPLEX_FULL;
 
        if (pi->link_cfg.fc & PAUSE_RX) {
                if (pi->link_cfg.fc & PAUSE_TX) {
@@ -1499,6 +1373,36 @@ static int cxgb4_get_module_eeprom(struct net_device *dev,
                         offset, len, &data[eprom->len - len]);
 }
 
+static u32 cxgb4_get_priv_flags(struct net_device *netdev)
+{
+       struct port_info *pi = netdev_priv(netdev);
+       struct adapter *adapter = pi->adapter;
+
+       return (adapter->eth_flags | pi->eth_flags);
+}
+
+/**
+ *     set_flags - set/unset specified flags if passed in new_flags
+ *     @cur_flags: pointer to current flags
+ *     @new_flags: new incoming flags
+ *     @flags: set of flags to set/unset
+ */
+static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
+{
+       *cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
+}
+
+static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+       struct port_info *pi = netdev_priv(netdev);
+       struct adapter *adapter = pi->adapter;
+
+       set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
+       set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
+
+       return 0;
+}
+
 static const struct ethtool_ops cxgb_ethtool_ops = {
        .get_link_ksettings = get_link_ksettings,
        .set_link_ksettings = set_link_ksettings,
@@ -1535,6 +1439,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
        .get_dump_data     = get_dump_data,
        .get_module_info   = cxgb4_get_module_info,
        .get_module_eeprom = cxgb4_get_module_eeprom,
+       .get_priv_flags    = cxgb4_get_priv_flags,
+       .set_priv_flags    = cxgb4_set_priv_flags,
 };
 
 void cxgb4_set_ethtool_ops(struct net_device *netdev)
index dd04a2f89ce62db6ea9bca433023d9aac4b10e23..40cf8dc9f16324330788f6bc8a593b441d47d44f 100644 (file)
@@ -263,7 +263,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
                                "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
                                enable ? "set" : "unset", pi->port_id, i, -err);
                else
-                       txq->dcb_prio = value;
+                       txq->dcb_prio = enable ? value : 0;
        }
 }
 
@@ -924,12 +924,14 @@ static int setup_sge_queues(struct adapter *adap)
                     QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
        return 0;
 freeout:
+       dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
        t4_free_sge_resources(adap);
        return err;
 }
 
 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
-                            void *accel_priv, select_queue_fallback_t fallback)
+                            struct net_device *sb_dev,
+                            select_queue_fallback_t fallback)
 {
        int txq;
 
@@ -971,7 +973,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
                return txq;
        }
 
-       return fallback(dev, skb) % dev->real_num_tx_queues;
+       return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
 }
 
 static int closest_timer(const struct sge *s, int time)
@@ -3016,7 +3018,7 @@ static int cxgb_setup_tc_block(struct net_device *dev,
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
-                                            pi, dev);
+                                            pi, dev, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
                return 0;
@@ -3217,7 +3219,7 @@ static netdev_features_t cxgb_fix_features(struct net_device *dev,
 static const struct net_device_ops cxgb4_netdev_ops = {
        .ndo_open             = cxgb_open,
        .ndo_stop             = cxgb_close,
-       .ndo_start_xmit       = t4_eth_xmit,
+       .ndo_start_xmit       = t4_start_xmit,
        .ndo_select_queue     = cxgb_select_queue,
        .ndo_get_stats64      = cxgb_get_stats,
        .ndo_set_rx_mode      = cxgb_set_rxmode,
@@ -3536,6 +3538,16 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
        u32 v;
        int ret;
 
+       /* Now that we've successfully configured and initialized the adapter
+        * can ask the Firmware what resources it has provisioned for us.
+        */
+       ret = t4_get_pfres(adap);
+       if (ret) {
+               dev_err(adap->pdev_dev,
+                       "Unable to retrieve resource provisioning information\n");
+               return ret;
+       }
+
        /* get device capabilities */
        memset(c, 0, sizeof(*c));
        c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
@@ -4170,32 +4182,6 @@ static int adap_init0(struct adapter *adap)
                        goto bye;
        }
 
-       /*
-        * Grab VPD parameters.  This should be done after we establish a
-        * connection to the firmware since some of the VPD parameters
-        * (notably the Core Clock frequency) are retrieved via requests to
-        * the firmware.  On the other hand, we need these fairly early on
-        * so we do this right after getting ahold of the firmware.
-        */
-       ret = t4_get_vpd_params(adap, &adap->params.vpd);
-       if (ret < 0)
-               goto bye;
-
-       /*
-        * Find out what ports are available to us.  Note that we need to do
-        * this before calling adap_init0_no_config() since it needs nports
-        * and portvec ...
-        */
-       v =
-           FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
-           FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
-       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
-       if (ret < 0)
-               goto bye;
-
-       adap->params.nports = hweight32(port_vec);
-       adap->params.portvec = port_vec;
-
        /* If the firmware is initialized already, emit a simply note to that
         * effect. Otherwise, it's time to try initializing the adapter.
         */
@@ -4246,6 +4232,45 @@ static int adap_init0(struct adapter *adap)
                }
        }
 
+       /* Now that we've successfully configured and initialized the adapter
+        * (or found it already initialized), we can ask the Firmware what
+        * resources it has provisioned for us.
+        */
+       ret = t4_get_pfres(adap);
+       if (ret) {
+               dev_err(adap->pdev_dev,
+                       "Unable to retrieve resource provisioning information\n");
+               goto bye;
+       }
+
+       /* Grab VPD parameters.  This should be done after we establish a
+        * connection to the firmware since some of the VPD parameters
+        * (notably the Core Clock frequency) are retrieved via requests to
+        * the firmware.  On the other hand, we need these fairly early on
+        * so we do this right after getting ahold of the firmware.
+        *
+        * We need to do this after initializing the adapter because someone
+        * could have FLASHed a new VPD which won't be read by the firmware
+        * until we do the RESET ...
+        */
+       ret = t4_get_vpd_params(adap, &adap->params.vpd);
+       if (ret < 0)
+               goto bye;
+
+       /* Find out what ports are available to us.  Note that we need to do
+        * this before calling adap_init0_no_config() since it needs nports
+        * and portvec ...
+        */
+       v =
+           FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+           FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+       if (ret < 0)
+               goto bye;
+
+       adap->params.nports = hweight32(port_vec);
+       adap->params.portvec = port_vec;
+
        /* Give the SGE code a chance to pull in anything that it needs ...
         * Note that this must be called after we retrieve our VPD parameters
         * in order to know how to convert core ticks to seconds, etc.
@@ -4797,10 +4822,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
  * of ports we found and the number of available CPUs.  Most settings can be
  * modified by the admin prior to actual use.
  */
-static void cfg_queues(struct adapter *adap)
+static int cfg_queues(struct adapter *adap)
 {
        struct sge *s = &adap->sge;
-       int i = 0, n10g = 0, qidx = 0;
+       int i, n10g = 0, qidx = 0;
+       int niqflint, neq, avail_eth_qsets;
+       int max_eth_qsets = 32;
 #ifndef CONFIG_CHELSIO_T4_DCB
        int q10g = 0;
 #endif
@@ -4812,16 +4839,46 @@ static void cfg_queues(struct adapter *adap)
                adap->params.crypto = 0;
        }
 
-       n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+       /* Calculate the number of Ethernet Queue Sets available based on
+        * resources provisioned for us.  We always have an Asynchronous
+        * Firmware Event Ingress Queue.  If we're operating in MSI or Legacy
+        * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
+        * Ingress Queue.  Meanwhile, we need two Egress Queues for each
+        * Queue Set: one for the Free List and one for the Ethernet TX Queue.
+        *
+        * Note that we should also take into account all of the various
+        * Offload Queues.  But, in any situation where we're operating in
+        * a Resource Constrained Provisioning environment, doing any Offload
+        * at all is problematic ...
+        */
+       niqflint = adap->params.pfres.niqflint - 1;
+       if (!(adap->flags & USING_MSIX))
+               niqflint--;
+       neq = adap->params.pfres.neq / 2;
+       avail_eth_qsets = min(niqflint, neq);
+
+       if (avail_eth_qsets > max_eth_qsets)
+               avail_eth_qsets = max_eth_qsets;
+
+       if (avail_eth_qsets < adap->params.nports) {
+               dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
+                       avail_eth_qsets, adap->params.nports);
+               return -ENOMEM;
+       }
+
+       /* Count the number of 10Gb/s or better ports */
+       for_each_port(adap, i)
+               n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+
 #ifdef CONFIG_CHELSIO_T4_DCB
        /* For Data Center Bridging support we need to be able to support up
         * to 8 Traffic Priorities; each of which will be assigned to its
         * own TX Queue in order to prevent Head-Of-Line Blocking.
         */
-       if (adap->params.nports * 8 > MAX_ETH_QSETS) {
-               dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
-                       MAX_ETH_QSETS, adap->params.nports * 8);
-               BUG_ON(1);
+       if (adap->params.nports * 8 > avail_eth_qsets) {
+               dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
+                       avail_eth_qsets, adap->params.nports * 8);
+               return -ENOMEM;
        }
 
        for_each_port(adap, i) {
@@ -4837,7 +4894,7 @@ static void cfg_queues(struct adapter *adap)
         * per 10G port.
         */
        if (n10g)
-               q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+               q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
        if (q10g > netif_get_num_default_rss_queues())
                q10g = netif_get_num_default_rss_queues();
 
@@ -4888,6 +4945,8 @@ static void cfg_queues(struct adapter *adap)
 
        init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
        init_rspq(adap, &s->intrq, 0, 1, 512, 64);
+
+       return 0;
 }
 
 /*
@@ -5628,10 +5687,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
+       if (!(adapter->flags & FW_OK))
+               goto fw_attach_fail;
+
        /* Configure queues and allocate tables now, they can be needed as
         * soon as the first register_netdev completes.
         */
-       cfg_queues(adapter);
+       err = cfg_queues(adapter);
+       if (err)
+               goto out_free_dev;
 
        adapter->smt = t4_init_smt();
        if (!adapter->smt) {
@@ -5703,7 +5767,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
                        u32 hash_base, hash_reg;
 
-                       if (chip <= CHELSIO_T5) {
+                       if (chip_ver <= CHELSIO_T5) {
                                hash_reg = LE_DB_TID_HASHBASE_A;
                                hash_base = t4_read_reg(adapter, hash_reg);
                                adapter->tids.hash_base = hash_base / 4;
@@ -5738,6 +5802,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_free_dev;
        }
 
+fw_attach_fail:
        /*
         * The card is now ready to go.  If any errors occur during device
         * registration we do not fail the whole card but rather proceed only
index 3ddd2c4acf6846e38697fde2f09f1b3aa300dbe6..623f73dd7738dbbb01b8f49649a4507d641b1d9a 100644 (file)
@@ -874,6 +874,9 @@ int cxgb4_init_tc_flower(struct adapter *adap)
 {
        int ret;
 
+       if (adap->tc_flower_initialized)
+               return -EEXIST;
+
        adap->flower_ht_params = cxgb4_tc_flower_ht_params;
        ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
        if (ret)
@@ -882,13 +885,18 @@ int cxgb4_init_tc_flower(struct adapter *adap)
        INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
        timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
        mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
+       adap->tc_flower_initialized = true;
        return 0;
 }
 
 void cxgb4_cleanup_tc_flower(struct adapter *adap)
 {
+       if (!adap->tc_flower_initialized)
+               return;
+
        if (adap->flower_stats_timer.function)
                del_timer_sync(&adap->flower_stats_timer);
        cancel_work_sync(&adap->flower_stats_work);
        rhashtable_destroy(&adap->flower_tbl);
+       adap->tc_flower_initialized = false;
 }
index 9148abb7994c8d9d91a75c01c286daa36ce9cd1b..7fc656680299703439d2e3bb590eaed762cd2afd 100644 (file)
@@ -539,6 +539,9 @@ void t4_cleanup_sched(struct adapter *adap)
                struct port_info *pi = netdev2pinfo(adap->port[j]);
 
                s = pi->sched_tbl;
+               if (!s)
+                       continue;
+
                for (i = 0; i < s->sched_size; i++) {
                        struct sched_class *e;
 
index 395e2a0e8d7f6235a36ae9ec73ebd5141b2f24ce..6807bc3a44fb7fad1fd6c229bafef6b5a8c063a1 100644 (file)
@@ -1288,13 +1288,13 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
 }
 
 /**
- *     t4_eth_xmit - add a packet to an Ethernet Tx queue
+ *     cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
  *     @skb: the packet
  *     @dev: the egress net device
  *
  *     Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
  */
-netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        u32 wr_mid, ctrl0, op;
        u64 cntrl, *end, *sgl;
@@ -1547,6 +1547,374 @@ out_free:       dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
 
+/* Constants ... */
+enum {
+       /* Egress Queue sizes, producer and consumer indices are all in units
+        * of Egress Context Units bytes.  Note that as far as the hardware is
+        * concerned, the free list is an Egress Queue (the host produces free
+        * buffers which the hardware consumes) and free list entries are
+        * 64-bit PCI DMA addresses.
+        */
+       EQ_UNIT = SGE_EQ_IDXSIZE,
+       FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+       TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
+
+       T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+                              sizeof(struct cpl_tx_pkt_lso_core) +
+                              sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
+};
+
+/**
+ *     t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
+ *     @skb: the packet
+ *
+ *     Returns whether an Ethernet packet is small enough to fit completely as
+ *     immediate data.
+ */
+static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
+{
+       /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
+        * which does not accommodate immediate data.  We could dike out all
+        * of the support code for immediate data but that would tie our hands
+        * too much if we ever want to enhace the firmware.  It would also
+        * create more differences between the PF and VF Drivers.
+        */
+       return false;
+}
+
+/**
+ *     t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
+ *     @skb: the packet
+ *
+ *     Returns the number of flits needed for a TX Work Request for the
+ *     given Ethernet packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
+{
+       unsigned int flits;
+
+       /* If the skb is small enough, we can pump it out as a work request
+        * with only immediate data.  In that case we just have to have the
+        * TX Packet header plus the skb data in the Work Request.
+        */
+       if (t4vf_is_eth_imm(skb))
+               return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
+                                   sizeof(__be64));
+
+       /* Otherwise, we're going to have to construct a Scatter gather list
+        * of the skb body and fragments.  We also include the flits necessary
+        * for the TX Packet Work Request and CPL.  We always have a firmware
+        * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+        * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+        * message or, if we're doing a Large Send Offload, an LSO CPL message
+        * with an embedded TX Packet Write CPL message.
+        */
+       flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+       if (skb_shinfo(skb)->gso_size)
+               flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+                         sizeof(struct cpl_tx_pkt_lso_core) +
+                         sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+       else
+               flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
+                         sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+       return flits;
+}
+
+/**
+ *     cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
+ *     @skb: the packet
+ *     @dev: the egress net device
+ *
+ *     Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
+ */
+static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
+                                    struct net_device *dev)
+{
+       dma_addr_t addr[MAX_SKB_FRAGS + 1];
+       const struct skb_shared_info *ssi;
+       struct fw_eth_tx_pkt_vm_wr *wr;
+       int qidx, credits, max_pkt_len;
+       struct cpl_tx_pkt_core *cpl;
+       const struct port_info *pi;
+       unsigned int flits, ndesc;
+       struct sge_eth_txq *txq;
+       struct adapter *adapter;
+       u64 cntrl, *end;
+       u32 wr_mid;
+       const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
+                                      sizeof(wr->ethmacsrc) +
+                                      sizeof(wr->ethtype) +
+                                      sizeof(wr->vlantci);
+
+       /* The chip minimum packet length is 10 octets but the firmware
+        * command that we are using requires that we copy the Ethernet header
+        * (including the VLAN tag) into the header so we reject anything
+        * smaller than that ...
+        */
+       if (unlikely(skb->len < fw_hdr_copy_len))
+               goto out_free;
+
+       /* Discard the packet if the length is greater than mtu */
+       max_pkt_len = ETH_HLEN + dev->mtu;
+       if (skb_vlan_tag_present(skb))
+               max_pkt_len += VLAN_HLEN;
+       if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+               goto out_free;
+
+       /* Figure out which TX Queue we're going to use. */
+       pi = netdev_priv(dev);
+       adapter = pi->adapter;
+       qidx = skb_get_queue_mapping(skb);
+       WARN_ON(qidx >= pi->nqsets);
+       txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
+
+       /* Take this opportunity to reclaim any TX Descriptors whose DMA
+        * transfers have completed.
+        */
+       cxgb4_reclaim_completed_tx(adapter, &txq->q, true);
+
+       /* Calculate the number of flits and TX Descriptors we're going to
+        * need along with how many TX Descriptors will be left over after
+        * we inject our Work Request.
+        */
+       flits = t4vf_calc_tx_flits(skb);
+       ndesc = flits_to_desc(flits);
+       credits = txq_avail(&txq->q) - ndesc;
+
+       if (unlikely(credits < 0)) {
+               /* Not enough room for this packet's Work Request.  Stop the
+                * TX Queue and return a "busy" condition.  The queue will get
+                * started later on when the firmware informs us that space
+                * has opened up.
+                */
+               eth_txq_stop(txq);
+               dev_err(adapter->pdev_dev,
+                       "%s: TX ring %u full while queue awake!\n",
+                       dev->name, qidx);
+               return NETDEV_TX_BUSY;
+       }
+
+       if (!t4vf_is_eth_imm(skb) &&
+           unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+               /* We need to map the skb into PCI DMA space (because it can't
+                * be in-lined directly into the Work Request) and the mapping
+                * operation failed.  Record the error and drop the packet.
+                */
+               txq->mapping_err++;
+               goto out_free;
+       }
+
+       wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+       if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+               /* After we're done injecting the Work Request for this
+                * packet, we'll be below our "stop threshold" so stop the TX
+                * Queue now and schedule a request for an SGE Egress Queue
+                * Update message.  The queue will get started later on when
+                * the firmware processes this Work Request and sends us an
+                * Egress Queue Status Update message indicating that space
+                * has opened up.
+                */
+               eth_txq_stop(txq);
+               wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+       }
+
+       /* Start filling in our Work Request.  Note that we do _not_ handle
+        * the WR Header wrapping around the TX Descriptor Ring.  If our
+        * maximum header size ever exceeds one TX Descriptor, we'll need to
+        * do something else here.
+        */
+       WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
+       wr = (void *)&txq->q.desc[txq->q.pidx];
+       wr->equiq_to_len16 = cpu_to_be32(wr_mid);
+       wr->r3[0] = cpu_to_be32(0);
+       wr->r3[1] = cpu_to_be32(0);
+       skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+       end = (u64 *)wr + flits;
+
+       /* If this is a Large Send Offload packet we'll put in an LSO CPL
+        * message with an encapsulated TX Packet CPL message.  Otherwise we
+        * just use a TX Packet CPL message.
+        */
+       ssi = skb_shinfo(skb);
+       if (ssi->gso_size) {
+               struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+               bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+               int l3hdr_len = skb_network_header_len(skb);
+               int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+               wr->op_immdlen =
+                       cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+                                   FW_WR_IMMDLEN_V(sizeof(*lso) +
+                                                   sizeof(*cpl)));
+                /* Fill in the LSO CPL message. */
+               lso->lso_ctrl =
+                       cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+                                   LSO_FIRST_SLICE_F |
+                                   LSO_LAST_SLICE_F |
+                                   LSO_IPV6_V(v6) |
+                                   LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+                                   LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+                                   LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+               lso->ipid_ofst = cpu_to_be16(0);
+               lso->mss = cpu_to_be16(ssi->gso_size);
+               lso->seqno_offset = cpu_to_be32(0);
+               if (is_t4(adapter->params.chip))
+                       lso->len = cpu_to_be32(skb->len);
+               else
+                       lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
+
+               /* Set up TX Packet CPL pointer, control word and perform
+                * accounting.
+                */
+               cpl = (void *)(lso + 1);
+
+               if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+                       cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+               else
+                       cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+               cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+                                          TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+                        TXPKT_IPHDR_LEN_V(l3hdr_len);
+               txq->tso++;
+               txq->tx_cso += ssi->gso_segs;
+       } else {
+               int len;
+
+               len = (t4vf_is_eth_imm(skb)
+                      ? skb->len + sizeof(*cpl)
+                      : sizeof(*cpl));
+               wr->op_immdlen =
+                       cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
+                                   FW_WR_IMMDLEN_V(len));
+
+               /* Set up TX Packet CPL pointer, control word and perform
+                * accounting.
+                */
+               cpl = (void *)(wr + 1);
+               if (skb->ip_summed == CHECKSUM_PARTIAL) {
+                       cntrl = hwcsum(adapter->params.chip, skb) |
+                               TXPKT_IPCSUM_DIS_F;
+                       txq->tx_cso++;
+               } else {
+                       cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+               }
+       }
+
+       /* If there's a VLAN tag present, add that to the list of things to
+        * do in this Work Request.
+        */
+       if (skb_vlan_tag_present(skb)) {
+               txq->vlan_ins++;
+               cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+       }
+
+        /* Fill in the TX Packet CPL message header. */
+       cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+                                TXPKT_INTF_V(pi->port_id) |
+                                TXPKT_PF_V(0));
+       cpl->pack = cpu_to_be16(0);
+       cpl->len = cpu_to_be16(skb->len);
+       cpl->ctrl1 = cpu_to_be64(cntrl);
+
+       /* Fill in the body of the TX Packet CPL message with either in-lined
+        * data or a Scatter/Gather List.
+        */
+       if (t4vf_is_eth_imm(skb)) {
+               /* In-line the packet's data and free the skb since we don't
+                * need it any longer.
+                */
+               cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
+               dev_consume_skb_any(skb);
+       } else {
+               /* Write the skb's Scatter/Gather list into the TX Packet CPL
+                * message and retain a pointer to the skb so we can free it
+                * later when its DMA completes.  (We store the skb pointer
+                * in the Software Descriptor corresponding to the last TX
+                * Descriptor used by the Work Request.)
+                *
+                * The retained skb will be freed when the corresponding TX
+                * Descriptors are reclaimed after their DMAs complete.
+                * However, this could take quite a while since, in general,
+                * the hardware is set up to be lazy about sending DMA
+                * completion notifications to us and we mostly perform TX
+                * reclaims in the transmit routine.
+                *
+                * This is good for performamce but means that we rely on new
+                * TX packets arriving to run the destructors of completed
+                * packets, which open up space in their sockets' send queues.
+                * Sometimes we do not get such new packets causing TX to
+                * stall.  A single UDP transmitter is a good example of this
+                * situation.  We have a clean up timer that periodically
+                * reclaims completed packets but it doesn't run often enough
+                * (nor do we want it to) to prevent lengthy stalls.  A
+                * solution to this problem is to run the destructor early,
+                * after the packet is queued but before it's DMAd.  A con is
+                * that we lie to socket memory accounting, but the amount of
+                * extra memory is reasonable (limited by the number of TX
+                * descriptors), the packets do actually get freed quickly by
+                * new packets almost always, and for protocols like TCP that
+                * wait for acks to really free up the data the extra memory
+                * is even less.  On the positive side we run the destructors
+                * on the sending CPU rather than on a potentially different
+                * completing CPU, usually a good thing.
+                *
+                * Run the destructor before telling the DMA engine about the
+                * packet to make sure it doesn't complete and get freed
+                * prematurely.
+                */
+               struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
+               struct sge_txq *tq = &txq->q;
+               int last_desc;
+
+               /* If the Work Request header was an exact multiple of our TX
+                * Descriptor length, then it's possible that the starting SGL
+                * pointer lines up exactly with the end of our TX Descriptor
+                * ring.  If that's the case, wrap around to the beginning
+                * here ...
+                */
+               if (unlikely((void *)sgl == (void *)tq->stat)) {
+                       sgl = (void *)tq->desc;
+                       end = (void *)((void *)tq->desc +
+                                      ((void *)end - (void *)tq->stat));
+               }
+
+               cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
+               skb_orphan(skb);
+
+               last_desc = tq->pidx + ndesc - 1;
+               if (last_desc >= tq->size)
+                       last_desc -= tq->size;
+               tq->sdesc[last_desc].skb = skb;
+               tq->sdesc[last_desc].sgl = sgl;
+       }
+
+       /* Advance our internal TX Queue state, tell the hardware about
+        * the new TX descriptors and return success.
+        */
+       txq_advance(&txq->q, ndesc);
+
+       cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
+       return NETDEV_TX_OK;
+
+out_free:
+       /* An error of some sort happened.  Free the TX skb and tell the
+        * OS that we've "dealt" with the packet ...
+        */
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct port_info *pi = netdev_priv(dev);
+
+       if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
+               return cxgb4_vf_eth_xmit(skb, dev);
+
+       return cxgb4_eth_xmit(skb, dev);
+}
+
 /**
  *     reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
  *     @q: the SGE control Tx queue
@@ -3044,7 +3412,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
        c.iqsize = htons(iq->size);
        c.iqaddr = cpu_to_be64(iq->phys_addr);
        if (cong >= 0)
-               c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
+               c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
+                               FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
+                                                       :  FW_IQ_IQTYPE_OFLD));
 
        if (fl) {
                enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
index 974a868a4824b78dc8cb7225f37b5d2cf8b24b32..d266177aeef5952b299341b398d592d02bbd5678 100644 (file)
@@ -2882,6 +2882,57 @@ int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        return 0;
 }
 
+/**
+ *     t4_get_pfres - retrieve VF resource limits
+ *     @adapter: the adapter
+ *
+ *     Retrieves configured resource limits and capabilities for a physical
+ *     function.  The results are stored in @adapter->pfres.
+ */
+int t4_get_pfres(struct adapter *adapter)
+{
+       struct pf_resources *pfres = &adapter->params.pfres;
+       struct fw_pfvf_cmd cmd, rpl;
+       int v;
+       u32 word;
+
+       /* Execute PFVF Read command to get VF resource limits; bail out early
+        * with error on command failure.
+        */
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+                                   FW_CMD_REQUEST_F |
+                                   FW_CMD_READ_F |
+                                   FW_PFVF_CMD_PFN_V(adapter->pf) |
+                                   FW_PFVF_CMD_VFN_V(0));
+       cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+       v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
+       if (v != FW_SUCCESS)
+               return v;
+
+       /* Extract PF resource limits and return success.
+        */
+       word = be32_to_cpu(rpl.niqflint_niq);
+       pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
+       pfres->niq = FW_PFVF_CMD_NIQ_G(word);
+
+       word = be32_to_cpu(rpl.type_to_neq);
+       pfres->neq = FW_PFVF_CMD_NEQ_G(word);
+       pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
+
+       word = be32_to_cpu(rpl.tc_to_nexactf);
+       pfres->tc = FW_PFVF_CMD_TC_G(word);
+       pfres->nvi = FW_PFVF_CMD_NVI_G(word);
+       pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
+
+       word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+       pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
+       pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
+       pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
+
+       return 0;
+}
+
 /* serial flash and firmware constants */
 enum {
        SF_ATTEMPTS = 10,             /* max retries for SF operations */
index c7f8d0441278fb19b1283d49c54265075bf89cf2..e3adf435913ed60646416770449d3aa589ddd1f2 100644 (file)
@@ -188,6 +188,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
        CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
        CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */
 
        /* T6 adapters:
         */
index 6b55aa2eb2a5a8be6e93bd82ca6c535086eb307a..446aaff15bae5237829073cdbb71756ac01a364f 100644 (file)
 #define ULP_TX_LA_RDPTR_0_A 0x8ec0
 #define ULP_TX_LA_RDDATA_0_A 0x8ec4
 #define ULP_TX_LA_WRPTR_0_A 0x8ec8
+#define ULP_TX_ASIC_DEBUG_CTRL_A 0x8f70
+
+#define ULP_TX_ASIC_DEBUG_0_A 0x8f74
+#define ULP_TX_ASIC_DEBUG_1_A 0x8f78
+#define ULP_TX_ASIC_DEBUG_2_A 0x8f7c
+#define ULP_TX_ASIC_DEBUG_3_A 0x8f80
+#define ULP_TX_ASIC_DEBUG_4_A 0x8f84
+
+/* registers for module PM_RX */
+#define PM_RX_BASE_ADDR 0x8fc0
 
 #define PMRX_E_PCMD_PAR_ERROR_S    0
 #define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
index f1967cf6d43c4b614aba152b7e6e53d0e8b5e644..5dc6c4154af8a6e5961290f84e31e9a10d551598 100644 (file)
@@ -1472,6 +1472,12 @@ enum fw_iq_type {
        FW_IQ_TYPE_NO_FL_INT_CAP
 };
 
+enum fw_iq_iqtype {
+       FW_IQ_IQTYPE_OTHER,
+       FW_IQ_IQTYPE_NIC,
+       FW_IQ_IQTYPE_OFLD,
+};
+
 struct fw_iq_cmd {
        __be32 op_to_vfn;
        __be32 alloc_to_len16;
@@ -1586,6 +1592,12 @@ struct fw_iq_cmd {
 #define FW_IQ_CMD_IQFLINTISCSIC_S      26
 #define FW_IQ_CMD_IQFLINTISCSIC_V(x)   ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
 
+#define FW_IQ_CMD_IQTYPE_S             24
+#define FW_IQ_CMD_IQTYPE_M             0x3
+#define FW_IQ_CMD_IQTYPE_V(x)          ((x) << FW_IQ_CMD_IQTYPE_S)
+#define FW_IQ_CMD_IQTYPE_G(x)          \
+       (((x) >> FW_IQ_CMD_IQTYPE_S) & FW_IQ_CMD_IQTYPE_M)
+
 #define FW_IQ_CMD_FL0CNGCHMAP_S                20
 #define FW_IQ_CMD_FL0CNGCHMAP_V(x)     ((x) << FW_IQ_CMD_FL0CNGCHMAP_S)
 
index 973c1fb70d09929f92fc47db0e3d60e3146eaff0..99038dfc7fbe52bea5932691133e2bdeced48844 100644 (file)
@@ -79,7 +79,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic)
        enic->rfs_h.max = enic->config.num_arfs;
        enic->rfs_h.free = enic->rfs_h.max;
        enic->rfs_h.toclean = 0;
-       enic_rfs_timer_start(enic);
 }
 
 void enic_rfs_flw_tbl_free(struct enic *enic)
@@ -88,7 +87,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
 
        enic_rfs_timer_stop(enic);
        spin_lock_bh(&enic->rfs_h.lock);
-       enic->rfs_h.free = 0;
        for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
                struct hlist_head *hhead;
                struct hlist_node *tmp;
@@ -99,6 +97,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
                        enic_delfltr(enic, n->fltr_id);
                        hlist_del(&n->node);
                        kfree(n);
+                       enic->rfs_h.free++;
                }
        }
        spin_unlock_bh(&enic->rfs_h.lock);
index 30d2eaa18c0479adcd75315db194d3785b8007bc..90c645b8538e0f7ae8c77d625ded6cd6b0e0ca0f 100644 (file)
@@ -1920,7 +1920,7 @@ static int enic_open(struct net_device *netdev)
 {
        struct enic *enic = netdev_priv(netdev);
        unsigned int i;
-       int err;
+       int err, ret;
 
        err = enic_request_intr(enic);
        if (err) {
@@ -1971,16 +1971,15 @@ static int enic_open(struct net_device *netdev)
                vnic_intr_unmask(&enic->intr[i]);
 
        enic_notify_timer_start(enic);
-       enic_rfs_flw_tbl_init(enic);
+       enic_rfs_timer_start(enic);
 
        return 0;
 
 err_out_free_rq:
        for (i = 0; i < enic->rq_count; i++) {
-               err = vnic_rq_disable(&enic->rq[i]);
-               if (err)
-                       return err;
-               vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+               ret = vnic_rq_disable(&enic->rq[i]);
+               if (!ret)
+                       vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
        }
        enic_dev_notify_unset(enic);
 err_out_free_intr:
@@ -2904,6 +2903,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        timer_setup(&enic->notify_timer, enic_notify_timer, 0);
 
+       enic_rfs_flw_tbl_init(enic);
        enic_set_rx_coal_setting(enic);
        INIT_WORK(&enic->reset, enic_reset);
        INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
index 6d7404f66f84af7322c6b58def8cbff94958ca12..1c9ad3630c7754b692079c19ffafec7c07922c80 100644 (file)
 #define DRV_NAME               "gmac-gemini"
 #define DRV_VERSION            "1.0"
 
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
 #define HSIZE_8                        0x00
 #define HSIZE_16               0x01
 #define HSIZE_32               0x02
@@ -146,6 +151,7 @@ struct gemini_ethernet {
        void __iomem *base;
        struct gemini_ethernet_port *port0;
        struct gemini_ethernet_port *port1;
+       bool initialized;
 
        spinlock_t      irq_lock; /* Locks IRQ-related registers */
        unsigned int    freeq_order;
@@ -300,23 +306,26 @@ static void gmac_speed_set(struct net_device *netdev)
                status.bits.speed = GMAC_SPEED_1000;
                if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
                        status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
-               netdev_info(netdev, "connect to RGMII @ 1Gbit\n");
+               netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n",
+                          phydev_name(phydev));
                break;
        case 100:
                status.bits.speed = GMAC_SPEED_100;
                if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
                        status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
-               netdev_info(netdev, "connect to RGMII @ 100 Mbit\n");
+               netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n",
+                          phydev_name(phydev));
                break;
        case 10:
                status.bits.speed = GMAC_SPEED_10;
                if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
                        status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
-               netdev_info(netdev, "connect to RGMII @ 10 Mbit\n");
+               netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n",
+                          phydev_name(phydev));
                break;
        default:
-               netdev_warn(netdev, "Not supported PHY speed (%d)\n",
-                           phydev->speed);
+               netdev_warn(netdev, "Unsupported PHY speed (%d) on %s\n",
+                           phydev->speed, phydev_name(phydev));
        }
 
        if (phydev->duplex == DUPLEX_FULL) {
@@ -363,12 +372,6 @@ static int gmac_setup_phy(struct net_device *netdev)
                return -ENODEV;
        netdev->phydev = phy;
 
-       netdev_info(netdev, "connected to PHY \"%s\"\n",
-                   phydev_name(phy));
-       phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
-                          (unsigned long)phy->phy_id,
-                          phy_modes(phy->interface));
-
        phy->supported &= PHY_GBIT_FEATURES;
        phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
        phy->advertising = phy->supported;
@@ -376,19 +379,19 @@ static int gmac_setup_phy(struct net_device *netdev)
        /* set PHY interface type */
        switch (phy->interface) {
        case PHY_INTERFACE_MODE_MII:
-               netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+               netdev_dbg(netdev,
+                          "MII: set GMAC0 to GMII mode, GMAC1 disabled\n");
                status.bits.mii_rmii = GMAC_PHY_MII;
-               netdev_info(netdev, "connect to MII\n");
                break;
        case PHY_INTERFACE_MODE_GMII:
-               netdev_info(netdev, "set GMAC0 to GMII mode, GMAC1 disabled\n");
+               netdev_dbg(netdev,
+                          "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n");
                status.bits.mii_rmii = GMAC_PHY_GMII;
-               netdev_info(netdev, "connect to GMII\n");
                break;
        case PHY_INTERFACE_MODE_RGMII:
-               dev_info(dev, "set GMAC0 and GMAC1 to MII/RGMII mode\n");
+               netdev_dbg(netdev,
+                          "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
                status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
-               netdev_info(netdev, "connect to RGMII\n");
                break;
        default:
                netdev_err(netdev, "Unsupported MII interface\n");
@@ -398,29 +401,63 @@ static int gmac_setup_phy(struct net_device *netdev)
        }
        writel(status.bits32, port->gmac_base + GMAC_STATUS);
 
+       if (netif_msg_link(port))
+               phy_attached_info(phy);
+
        return 0;
 }
 
-static int gmac_pick_rx_max_len(int max_l3_len)
-{
-       /* index = CONFIG_MAXLEN_XXX values */
-       static const int max_len[8] = {
-               1536, 1518, 1522, 1542,
-               9212, 10236, 1518, 1518
-       };
-       int i, n = 5;
+/* The maximum frame length is not logically enumerated in the
+ * hardware, so we do a table lookup to find the applicable max
+ * frame length.
+ */
+struct gmac_max_framelen {
+       unsigned int max_l3_len;
+       u8 val;
+};
+
+static const struct gmac_max_framelen gmac_maxlens[] = {
+       {
+               .max_l3_len = 1518,
+               .val = CONFIG0_MAXLEN_1518,
+       },
+       {
+               .max_l3_len = 1522,
+               .val = CONFIG0_MAXLEN_1522,
+       },
+       {
+               .max_l3_len = 1536,
+               .val = CONFIG0_MAXLEN_1536,
+       },
+       {
+               .max_l3_len = 1542,
+               .val = CONFIG0_MAXLEN_1542,
+       },
+       {
+               .max_l3_len = 9212,
+               .val = CONFIG0_MAXLEN_9k,
+       },
+       {
+               .max_l3_len = 10236,
+               .val = CONFIG0_MAXLEN_10k,
+       },
+};
 
-       max_l3_len += ETH_HLEN + VLAN_HLEN;
+static int gmac_pick_rx_max_len(unsigned int max_l3_len)
+{
+       const struct gmac_max_framelen *maxlen;
+       int maxtot;
+       int i;
 
-       if (max_l3_len > max_len[n])
-               return -1;
+       maxtot = max_l3_len + ETH_HLEN + VLAN_HLEN;
 
-       for (i = 0; i < 5; i++) {
-               if (max_len[i] >= max_l3_len && max_len[i] < max_len[n])
-                       n = i;
+       for (i = 0; i < ARRAY_SIZE(gmac_maxlens); i++) {
+               maxlen = &gmac_maxlens[i];
+               if (maxtot <= maxlen->max_l3_len)
+                       return maxlen->val;
        }
 
-       return n;
+       return -1;
 }
 
 static int gmac_init(struct net_device *netdev)
@@ -1276,8 +1313,8 @@ static void gmac_enable_irq(struct net_device *netdev, int enable)
        unsigned long flags;
        u32 val, mask;
 
-       netdev_info(netdev, "%s device %d %s\n", __func__,
-                   netdev->dev_id, enable ? "enable" : "disable");
+       netdev_dbg(netdev, "%s device %d %s\n", __func__,
+                  netdev->dev_id, enable ? "enable" : "disable");
        spin_lock_irqsave(&geth->irq_lock, flags);
 
        mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2);
@@ -1753,7 +1790,10 @@ static int gmac_open(struct net_device *netdev)
        phy_start(netdev->phydev);
 
        err = geth_resize_freeq(port);
-       if (err) {
+       /* It's fine if it's just busy, the other port has set up
+        * the freeq in that case.
+        */
+       if (err && (err != -EBUSY)) {
                netdev_err(netdev, "could not resize freeq\n");
                goto err_stop_phy;
        }
@@ -1782,7 +1822,7 @@ static int gmac_open(struct net_device *netdev)
                     HRTIMER_MODE_REL);
        port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
 
-       netdev_info(netdev, "opened\n");
+       netdev_dbg(netdev, "opened\n");
 
        return 0;
 
@@ -2264,6 +2304,14 @@ static void gemini_port_remove(struct gemini_ethernet_port *port)
 
 static void gemini_ethernet_init(struct gemini_ethernet *geth)
 {
+       /* Only do this once both ports are online */
+       if (geth->initialized)
+               return;
+       if (geth->port0 && geth->port1)
+               geth->initialized = true;
+       else
+               return;
+
        writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
        writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG);
        writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG);
@@ -2354,6 +2402,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
        port->id = id;
        port->geth = geth;
        port->dev = dev;
+       port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
        /* DMA memory */
        dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2410,6 +2459,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
                geth->port0 = port;
        else
                geth->port1 = port;
+
+       /* This will just be done once both ports are up and reset */
+       gemini_ethernet_init(geth);
+
        platform_set_drvdata(pdev, port);
 
        /* Set up and register the netdev */
@@ -2423,6 +2476,11 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
 
        netdev->hw_features = GMAC_OFFLOAD_FEATURES;
        netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
+       /* We can handle jumbo frames up to 10236 bytes so, let's accept
+        * payloads of 10236 bytes minus VLAN and ethernet header
+        */
+       netdev->min_mtu = ETH_MIN_MTU;
+       netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
 
        port->freeq_refill = 0;
        netif_napi_add(netdev, &port->napi, gmac_napi_poll,
@@ -2435,7 +2493,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
                        port->mac_addr[0], port->mac_addr[1],
                        port->mac_addr[2]);
                dev_info(dev, "using a random ethernet address\n");
-               random_ether_addr(netdev->dev_addr);
+               eth_random_addr(netdev->dev_addr);
        }
        gmac_write_mac_address(netdev);
 
@@ -2527,7 +2585,6 @@ static int gemini_ethernet_probe(struct platform_device *pdev)
 
        spin_lock_init(&geth->irq_lock);
        spin_lock_init(&geth->freeq_lock);
-       gemini_ethernet_init(geth);
 
        /* The children will use this */
        platform_set_drvdata(pdev, geth);
@@ -2540,8 +2597,8 @@ static int gemini_ethernet_remove(struct platform_device *pdev)
 {
        struct gemini_ethernet *geth = platform_get_drvdata(pdev);
 
-       gemini_ethernet_init(geth);
        geth_cleanup_freeq(geth);
+       geth->initialized = false;
 
        return 0;
 }
index 382891f81e0932a273ca3de26f95c1a685bea64e..7005949dc17bb70a99c8b3b6ff1dcc064afd8242 100644 (file)
@@ -185,34 +185,13 @@ static inline void queue_tail_inc(struct be_queue_info *q)
 
 struct be_eq_obj {
        struct be_queue_info q;
-       char desc[32];
-
-       /* Adaptive interrupt coalescing (AIC) info */
-       bool enable_aic;
-       u32 min_eqd;            /* in usecs */
-       u32 max_eqd;            /* in usecs */
-       u32 eqd;                /* configured val when aic is off */
-       u32 cur_eqd;            /* in usecs */
 
+       struct be_adapter *adapter;
+       struct napi_struct napi;
        u8 idx;                 /* array index */
        u8 msix_idx;
        u16 spurious_intr;
-       struct napi_struct napi;
-       struct be_adapter *adapter;
        cpumask_var_t  affinity_mask;
-
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define BE_EQ_IDLE             0
-#define BE_EQ_NAPI             1       /* napi owns this EQ */
-#define BE_EQ_POLL             2       /* poll owns this EQ */
-#define BE_EQ_LOCKED           (BE_EQ_NAPI | BE_EQ_POLL)
-#define BE_EQ_NAPI_YIELD       4       /* napi yielded this EQ */
-#define BE_EQ_POLL_YIELD       8       /* poll yielded this EQ */
-#define BE_EQ_YIELD            (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
-#define BE_EQ_USER_PEND                (BE_EQ_POLL | BE_EQ_POLL_YIELD)
-       unsigned int state;
-       spinlock_t lock;        /* lock to serialize napi and busy-poll */
-#endif  /* CONFIG_NET_RX_BUSY_POLL */
 } ____cacheline_aligned_in_smp;
 
 struct be_aic_obj {            /* Adaptive interrupt coalescing (AIC) info */
@@ -238,7 +217,6 @@ struct be_tx_stats {
        u64 tx_vxlan_offload_pkts;
        u64 tx_reqs;
        u64 tx_compl;
-       ulong tx_jiffies;
        u32 tx_stops;
        u32 tx_drv_drops;       /* pkts dropped by driver */
        /* the error counters are described in be_ethtool.c */
@@ -261,9 +239,9 @@ struct be_tx_compl_info {
 
 struct be_tx_obj {
        u32 db_offset;
+       struct be_tx_compl_info txcp;
        struct be_queue_info q;
        struct be_queue_info cq;
-       struct be_tx_compl_info txcp;
        /* Remember the skbs that were transmitted */
        struct sk_buff *sent_skb_list[TX_Q_LEN];
        struct be_tx_stats stats;
@@ -458,10 +436,10 @@ struct be_port_resources {
 #define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
 
 struct rss_info {
-       u64 rss_flags;
        u8 rsstable[RSS_INDIR_TABLE_LEN];
        u8 rss_queue[RSS_INDIR_TABLE_LEN];
        u8 rss_hkey[RSS_HASH_KEY_LEN];
+       u64 rss_flags;
 };
 
 #define BE_INVALID_DIE_TEMP    0xFF
@@ -544,11 +522,13 @@ enum {
 };
 
 struct be_error_recovery {
-       /* Lancer error recovery variables */
-       u8 recovery_retries;
+       union {
+               u8 recovery_retries;    /* used for Lancer              */
+               u8 recovery_state;      /* used for BEx and Skyhawk     */
+       };
 
        /* BEx/Skyhawk error recovery variables */
-       u8 recovery_state;
+       bool recovery_supported;
        u16 ue_to_reset_time;           /* Time after UE, to soft reset
                                         * the chip - PF0 only
                                         */
@@ -556,7 +536,6 @@ struct be_error_recovery {
                                         * of SLIPORT_SEMAPHORE reg
                                         */
        u16 last_err_code;
-       bool recovery_supported;
        unsigned long probe_time;
        unsigned long last_recovery_time;
 
index 8f755009ff3820e34fa1501970054513cf5198ce..05e4c0bb25f44c96038bc5b268adf2d921c22d0a 100644 (file)
@@ -3403,9 +3403,11 @@ static int be_msix_register(struct be_adapter *adapter)
        int status, i, vec;
 
        for_all_evt_queues(adapter, eqo, i) {
-               sprintf(eqo->desc, "%s-q%d", netdev->name, i);
+               char irq_name[IFNAMSIZ+4];
+
+               snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i);
                vec = be_msix_vec_get(adapter, eqo);
-               status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
+               status = request_irq(vec, be_msix, 0, irq_name, eqo);
                if (status)
                        goto err_msix;
 
index 78db8e62a83f17c05d615cb674703efa4e926bd0..ed6c76d20b45b2a38ccf87e63487e77a756812a3 100644 (file)
@@ -1735,8 +1735,8 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
        if (unlikely(nd->state != ncsi_dev_state_functional))
                return;
 
-       netdev_info(nd->dev, "NCSI interface %s\n",
-                   nd->link_up ? "up" : "down");
+       netdev_dbg(nd->dev, "NCSI interface %s\n",
+                  nd->link_up ? "up" : "down");
 }
 
 static void ftgmac100_setup_clk(struct ftgmac100 *priv)
index 5f4e1ffa7b95fe4f8d2bb6447764951c51fffc67..65a22cd9aef26197f79877862756133b4109b895 100644 (file)
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 /* Default alignment for start of data in an Rx FD */
 #define DPAA_FD_DATA_ALIGNMENT  16
 
+/* The DPAA requires 256 bytes reserved and mapped for the SGT */
+#define DPAA_SGT_SIZE 256
+
 /* Values for the L3R field of the FM Parse Results
  */
 /* L3 Type field: First IP Present IPv4 */
@@ -1168,7 +1171,7 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
        buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
        buf_prefix_content.pass_prs_result = true;
        buf_prefix_content.pass_hash_result = true;
-       buf_prefix_content.pass_time_stamp = false;
+       buf_prefix_content.pass_time_stamp = true;
        buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
 
        params.specific_params.non_rx_params.err_fqid = errq->fqid;
@@ -1210,7 +1213,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
        buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
        buf_prefix_content.pass_prs_result = true;
        buf_prefix_content.pass_hash_result = true;
-       buf_prefix_content.pass_time_stamp = false;
+       buf_prefix_content.pass_time_stamp = true;
        buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
 
        rx_p = &params.specific_params.rx_params;
@@ -1607,18 +1610,32 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 {
        const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
        struct device *dev = priv->net_dev->dev.parent;
+       struct skb_shared_hwtstamps shhwtstamps;
        dma_addr_t addr = qm_fd_addr(fd);
        const struct qm_sg_entry *sgt;
        struct sk_buff **skbh, *skb;
        int nr_frags, i;
+       u64 ns;
 
        skbh = (struct sk_buff **)phys_to_virt(addr);
        skb = *skbh;
 
+       if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+               memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+               if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
+                                         &ns)) {
+                       shhwtstamps.hwtstamp = ns_to_ktime(ns);
+                       skb_tstamp_tx(skb, &shhwtstamps);
+               } else {
+                       dev_warn(dev, "fman_port_get_tstamp failed!\n");
+               }
+       }
+
        if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
                nr_frags = skb_shinfo(skb)->nr_frags;
-               dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
-                                sizeof(struct qm_sg_entry) * (1 + nr_frags),
+               dma_unmap_single(dev, addr,
+                                qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
                                 dma_dir);
 
                /* The sgt buffer has been allocated with netdev_alloc_frag(),
@@ -1903,8 +1920,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        void *sgt_buf;
 
        /* get a page frag to store the SGTable */
-       sz = SKB_DATA_ALIGN(priv->tx_headroom +
-               sizeof(struct qm_sg_entry) * (1 + nr_frags));
+       sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
        sgt_buf = netdev_alloc_frag(sz);
        if (unlikely(!sgt_buf)) {
                netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
@@ -1972,9 +1988,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        skbh = (struct sk_buff **)buffer_start;
        *skbh = skb;
 
-       addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
-                             sizeof(struct qm_sg_entry) * (1 + nr_frags),
-                             dma_dir);
+       addr = dma_map_single(dev, buffer_start,
+                             priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
        if (unlikely(dma_mapping_error(dev, addr))) {
                dev_err(dev, "DMA mapping failed");
                err = -EINVAL;
@@ -2086,6 +2101,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
        if (unlikely(err < 0))
                goto skb_to_fd_failed;
 
+       if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+               fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
+               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+       }
+
        if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
                return NETDEV_TX_OK;
 
@@ -2227,6 +2247,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
                                                struct qman_fq *fq,
                                                const struct qm_dqrr_entry *dq)
 {
+       struct skb_shared_hwtstamps *shhwtstamps;
        struct rtnl_link_stats64 *percpu_stats;
        struct dpaa_percpu_priv *percpu_priv;
        const struct qm_fd *fd = &dq->fd;
@@ -2240,6 +2261,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
        struct sk_buff *skb;
        int *count_ptr;
        void *vaddr;
+       u64 ns;
 
        fd_status = be32_to_cpu(fd->status);
        fd_format = qm_fd_get_format(fd);
@@ -2304,6 +2326,16 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
        if (!skb)
                return qman_cb_dqrr_consume;
 
+       if (priv->rx_tstamp) {
+               shhwtstamps = skb_hwtstamps(skb);
+               memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+               if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
+                       shhwtstamps->hwtstamp = ns_to_ktime(ns);
+               else
+                       dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n");
+       }
+
        skb->protocol = eth_type_trans(skb, net_dev);
 
        if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
@@ -2523,11 +2555,58 @@ static int dpaa_eth_stop(struct net_device *net_dev)
        return err;
 }
 
+static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       struct dpaa_priv *priv = netdev_priv(dev);
+       struct hwtstamp_config config;
+
+       if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               /* Couldn't disable rx/tx timestamping separately.
+                * Do nothing here.
+                */
+               priv->tx_tstamp = false;
+               break;
+       case HWTSTAMP_TX_ON:
+               priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+               priv->tx_tstamp = true;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+               /* Couldn't disable rx/tx timestamping separately.
+                * Do nothing here.
+                */
+               priv->rx_tstamp = false;
+       } else {
+               priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
+               priv->rx_tstamp = true;
+               /* TS is set for all frame types, not only those requested */
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+       }
+
+       return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
+                       -EFAULT : 0;
+}
+
 static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
 {
-       if (!net_dev->phydev)
-               return -EINVAL;
-       return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+       int ret = -EINVAL;
+
+       if (cmd == SIOCGMIIREG) {
+               if (net_dev->phydev)
+                       return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+       }
+
+       if (cmd == SIOCSHWTSTAMP)
+               return dpaa_ts_ioctl(net_dev, rq, cmd);
+
+       return ret;
 }
 
 static const struct net_device_ops dpaa_ops = {
index bd9422082f83c8cffd9e12116a155ac185a190f1..af320f83c742a0c894c1fe4e784fae74f0abdbe4 100644 (file)
@@ -182,6 +182,9 @@ struct dpaa_priv {
 
        struct dpaa_buffer_layout buf_layout[2];
        u16 rx_headroom;
+
+       bool tx_tstamp; /* Tx timestamping enabled */
+       bool rx_tstamp; /* Rx timestamping enabled */
 };
 
 /* from dpaa_ethtool.c */
index 2f933b6b2f4e79b9e1359d8a9b57ba6af4acbfb9..3184c8f7cdd05a3114f0b6c4216fa0c9d8377795 100644 (file)
@@ -32,6 +32,9 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/string.h>
+#include <linux/of_platform.h>
+#include <linux/net_tstamp.h>
+#include <linux/fsl/ptp_qoriq.h>
 
 #include "dpaa_eth.h"
 #include "mac.h"
@@ -515,6 +518,41 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
        return ret;
 }
 
+static int dpaa_get_ts_info(struct net_device *net_dev,
+                           struct ethtool_ts_info *info)
+{
+       struct device *dev = net_dev->dev.parent;
+       struct device_node *mac_node = dev->of_node;
+       struct device_node *fman_node = NULL, *ptp_node = NULL;
+       struct platform_device *ptp_dev = NULL;
+       struct qoriq_ptp *ptp = NULL;
+
+       info->phc_index = -1;
+
+       fman_node = of_get_parent(mac_node);
+       if (fman_node)
+               ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+
+       if (ptp_node)
+               ptp_dev = of_find_device_by_node(ptp_node);
+
+       if (ptp_dev)
+               ptp = platform_get_drvdata(ptp_dev);
+
+       if (ptp)
+               info->phc_index = ptp->phc_index;
+
+       info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+                               SOF_TIMESTAMPING_RX_HARDWARE |
+                               SOF_TIMESTAMPING_RAW_HARDWARE;
+       info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+                        (1 << HWTSTAMP_TX_ON);
+       info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+                          (1 << HWTSTAMP_FILTER_ALL);
+
+       return 0;
+}
+
 const struct ethtool_ops dpaa_ethtool_ops = {
        .get_drvinfo = dpaa_get_drvinfo,
        .get_msglevel = dpaa_get_msglevel,
@@ -530,4 +568,5 @@ const struct ethtool_ops dpaa_ethtool_ops = {
        .set_link_ksettings = dpaa_set_link_ksettings,
        .get_rxnfc = dpaa_get_rxnfc,
        .set_rxnfc = dpaa_set_rxnfc,
+       .get_ts_info = dpaa_get_ts_info,
 };
index 36c2d7d6ee1b0bb0cd14aa0710eee1ebfea6b942..7e892b1cbd3de951a1721108c4976daa1686edae 100644 (file)
@@ -99,7 +99,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
 {
        unsigned long flags;
        u32 val, tempval;
-       int inc;
        struct timespec64 ts;
        u64 ns;
        val = 0;
@@ -114,7 +113,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
 
        fep->pps_channel = DEFAULT_PPS_CHANNEL;
        fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
-       inc = fep->ptp_inc;
 
        spin_lock_irqsave(&fep->tmreg_lock, flags);
 
index 9530405030a70974c92c1ddc2263b00610aab32c..c415ac67cb7bef218d476fc59f7302b83660513b 100644 (file)
@@ -2801,7 +2801,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
        of_node_put(muram_node);
        of_node_put(fm_node);
 
-       err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
+       err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
+                              "fman", fman);
        if (err < 0) {
                dev_err(&of_dev->dev, "%s: irq %d allocation failed (error = %d)\n",
                        __func__, irq, err);
index bfa02e0014ae01f4a08a600e0ba27a856c0371f6..935c317fa69642c9707fc583d1f34a12061b390e 100644 (file)
@@ -41,6 +41,7 @@
 /* Frame queue Context Override */
 #define FM_FD_CMD_FCO                   0x80000000
 #define FM_FD_CMD_RPD                   0x40000000  /* Read Prepended Data */
+#define FM_FD_CMD_UPD                  0x20000000  /* Update Prepended Data */
 #define FM_FD_CMD_DTC                   0x10000000  /* Do L4 Checksum */
 
 /* TX-Port: Unsupported Format */
index 57b1e2b47c0a9c68a8bfeb18e166804c3fe0cacd..1ca543ac8f2cd606e5b6e5e727a4f21b96e8550e 100644 (file)
 #define DTSEC_ECNTRL_R100M             0x00000008
 #define DTSEC_ECNTRL_QSGMIIM           0x00000001
 
+#define TCTRL_TTSE                     0x00000040
 #define TCTRL_GTS                      0x00000020
 
 #define RCTRL_PAL_MASK                 0x001f0000
 #define RCTRL_PAL_SHIFT                        16
 #define RCTRL_GHTX                     0x00000400
+#define RCTRL_RTSE                     0x00000040
 #define RCTRL_GRS                      0x00000020
 #define RCTRL_MPROM                    0x00000008
 #define RCTRL_RSF                      0x00000004
@@ -1136,6 +1138,31 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
        return 0;
 }
 
+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+{
+       struct dtsec_regs __iomem *regs = dtsec->regs;
+       u32 rctrl, tctrl;
+
+       if (!is_init_done(dtsec->dtsec_drv_param))
+               return -EINVAL;
+
+       rctrl = ioread32be(&regs->rctrl);
+       tctrl = ioread32be(&regs->tctrl);
+
+       if (enable) {
+               rctrl |= RCTRL_RTSE;
+               tctrl |= TCTRL_TTSE;
+       } else {
+               rctrl &= ~RCTRL_RTSE;
+               tctrl &= ~TCTRL_TTSE;
+       }
+
+       iowrite32be(rctrl, &regs->rctrl);
+       iowrite32be(tctrl, &regs->tctrl);
+
+       return 0;
+}
+
 int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
 {
        struct dtsec_regs __iomem *regs = dtsec->regs;
index 1a689adf5a22744a12d81f8ef1f535f1f9d420f4..5149d96ec2c15e80648700a18d6d56cb522751c6 100644 (file)
@@ -56,5 +56,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
 int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
 int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
 int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
+int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
 
 #endif /* __DTSEC_H */
index 446a97b792e3dea467f96c4fed3dfee1cd7840f4..bc6eb30aa20f1736cc49a78531a945740a76a62f 100644 (file)
@@ -964,6 +964,11 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
        return 0;
 }
 
+int memac_set_tstamp(struct fman_mac *memac, bool enable)
+{
+       return 0; /* Always enabled. */
+}
+
 int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
 {
        struct memac_regs __iomem *regs = memac->regs;
index b5a50338ed9ae21dd824129295ec868454c1da1a..b2c671ec0ce7909dc4ae0f79e40487430ed8f9e1 100644 (file)
@@ -58,5 +58,6 @@ int memac_set_exception(struct fman_mac *memac,
 int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
 int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
 int memac_set_allmulti(struct fman_mac *memac, bool enable);
+int memac_set_tstamp(struct fman_mac *memac, bool enable);
 
 #endif /* __MEMAC_H */
index ce6e24c74978a22a1d22383f0a5b4f38ffec7c00..ee82ee1384eb3160651ce4764f382c3873edaa91 100644 (file)
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
 #define HWP_HXS_PHE_REPORT 0x00000800
 #define HWP_HXS_PCAC_PSTAT 0x00000100
 #define HWP_HXS_PCAC_PSTOP 0x00000001
+#define HWP_HXS_TCP_OFFSET 0xA
+#define HWP_HXS_UDP_OFFSET 0xB
+#define HWP_HXS_SH_PAD_REM 0x80000000
+
 struct fman_port_hwp_regs {
        struct {
                u32 ssa; /* Soft Sequence Attachment */
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
                iowrite32be(0xffffffff, &regs->pmda[i].lcv);
        }
 
+       /* Short packet padding removal from checksum calculation */
+       iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
+       iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
+
        start_port_hwp(port);
 }
 
@@ -1731,6 +1739,18 @@ int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
 }
 EXPORT_SYMBOL(fman_port_get_hash_result_offset);
 
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp)
+{
+       if (port->buffer_offsets.time_stamp_offset == ILLEGAL_BASE)
+               return -EINVAL;
+
+       *tstamp = be64_to_cpu(*(__be64 *)(data +
+                       port->buffer_offsets.time_stamp_offset));
+
+       return 0;
+}
+EXPORT_SYMBOL(fman_port_get_tstamp);
+
 static int fman_port_probe(struct platform_device *of_dev)
 {
        struct fman_port *port;
index e86ca6a34e4e296051258df2b4d268bf4ade09a6..9dbb69f4012160fb744372351a87c0d3ff595a10 100644 (file)
@@ -153,6 +153,8 @@ u32 fman_port_get_qman_channel_id(struct fman_port *port);
 
 int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset);
 
+int fman_port_get_tstamp(struct fman_port *port, const void *data, u64 *tstamp);
+
 struct fman_port *fman_port_bind(struct device *dev);
 
 #endif /* __FMAN_PORT_H */
index 284735d4ebe9bbd452fd902c6723a6deb99cbd69..40705938eeccfb4e532d9d2732774934172bac6c 100644 (file)
@@ -44,6 +44,7 @@
 #define TGEC_TX_IPG_LENGTH_MASK        0x000003ff
 
 /* Command and Configuration Register (COMMAND_CONFIG) */
+#define CMD_CFG_EN_TIMESTAMP           0x00100000
 #define CMD_CFG_NO_LEN_CHK             0x00020000
 #define CMD_CFG_PAUSE_IGNORE           0x00000100
 #define CMF_CFG_CRC_FWD                        0x00000040
@@ -588,6 +589,26 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
        return 0;
 }
 
+int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+{
+       struct tgec_regs __iomem *regs = tgec->regs;
+       u32 tmp;
+
+       if (!is_init_done(tgec->cfg))
+               return -EINVAL;
+
+       tmp = ioread32be(&regs->command_config);
+
+       if (enable)
+               tmp |= CMD_CFG_EN_TIMESTAMP;
+       else
+               tmp &= ~CMD_CFG_EN_TIMESTAMP;
+
+       iowrite32be(tmp, &regs->command_config);
+
+       return 0;
+}
+
 int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
 {
        struct tgec_regs __iomem *regs = tgec->regs;
index cbbd3b422a98b43a50a0c5a1e33705051f9b02dd..3bfd1062b386dea6ecc2ea3cfa58463a29166cd1 100644 (file)
@@ -52,5 +52,6 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
 int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
 int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
 int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
+int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
 
 #endif /* __TGEC_H */
index 7b5b95f52c098535942f625001847a2ea26dbeb3..a847b9c3b31a9a4e5c9c6f9734973c89d596e4c6 100644 (file)
@@ -471,6 +471,7 @@ static void setup_dtsec(struct mac_device *mac_dev)
        mac_dev->set_rx_pause           = dtsec_accept_rx_pause_frames;
        mac_dev->set_exception          = dtsec_set_exception;
        mac_dev->set_allmulti           = dtsec_set_allmulti;
+       mac_dev->set_tstamp             = dtsec_set_tstamp;
        mac_dev->set_multi              = set_multi;
        mac_dev->start                  = start;
        mac_dev->stop                   = stop;
@@ -490,6 +491,7 @@ static void setup_tgec(struct mac_device *mac_dev)
        mac_dev->set_rx_pause           = tgec_accept_rx_pause_frames;
        mac_dev->set_exception          = tgec_set_exception;
        mac_dev->set_allmulti           = tgec_set_allmulti;
+       mac_dev->set_tstamp             = tgec_set_tstamp;
        mac_dev->set_multi              = set_multi;
        mac_dev->start                  = start;
        mac_dev->stop                   = stop;
@@ -509,6 +511,7 @@ static void setup_memac(struct mac_device *mac_dev)
        mac_dev->set_rx_pause           = memac_accept_rx_pause_frames;
        mac_dev->set_exception          = memac_set_exception;
        mac_dev->set_allmulti           = memac_set_allmulti;
+       mac_dev->set_tstamp             = memac_set_tstamp;
        mac_dev->set_multi              = set_multi;
        mac_dev->start                  = start;
        mac_dev->stop                   = stop;
index b520cec120ee0af8fc571f8820079cffad8e4371..824a81a9f35072cdd39a0a8ba5ba041f20da1301 100644 (file)
@@ -68,6 +68,7 @@ struct mac_device {
        int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
        int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
        int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
+       int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
        int (*set_multi)(struct net_device *net_dev,
                         struct mac_device *mac_dev);
        int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
index 8cb98cae0a6f506aa7804a5676e7a43570640017..395a5266ea30ad6186b78afd5f1b76f8a138cb84 100644 (file)
@@ -740,7 +740,6 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
 static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
                                       u64 class)
 {
-       unsigned int last_rule_idx = priv->cur_filer_idx;
        unsigned int cmp_rqfpr;
        unsigned int *local_rqfpr;
        unsigned int *local_rqfcr;
@@ -819,7 +818,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
        }
 
        priv->cur_filer_idx = l - 1;
-       last_rule_idx = l;
 
        /* hash rules */
        ethflow_to_filer_rules(priv, ethflow);
index 42fca3208c0bac2e7a72f3f82d3350b85b6d9398..22a817da861e3f62684123b26a81ae59944c22f3 100644 (file)
@@ -3096,6 +3096,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        ugeth_vdbg("%s: IN", __func__);
 
+       netdev_sent_queue(dev, skb->len);
        spin_lock_irqsave(&ugeth->lock, flags);
 
        dev->stats.tx_bytes += skb->len;
@@ -3240,6 +3241,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
 {
        /* Start from the next BD that should be filled */
        struct ucc_geth_private *ugeth = netdev_priv(dev);
+       unsigned int bytes_sent = 0;
+       int howmany = 0;
        u8 __iomem *bd;         /* BD pointer */
        u32 bd_status;
 
@@ -3257,7 +3260,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
                skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
                if (!skb)
                        break;
-
+               howmany++;
+               bytes_sent += skb->len;
                dev->stats.tx_packets++;
 
                dev_consume_skb_any(skb);
@@ -3279,6 +3283,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
                bd_status = in_be32((u32 __iomem *)bd);
        }
        ugeth->confBd[txQ] = bd;
+       netdev_completed_queue(dev, howmany, bytes_sent);
        return 0;
 }
 
@@ -3479,6 +3484,7 @@ static int ucc_geth_open(struct net_device *dev)
 
        phy_start(ugeth->phydev);
        napi_enable(&ugeth->napi);
+       netdev_reset_queue(dev);
        netif_start_queue(dev);
 
        device_set_wakeup_capable(&dev->dev,
@@ -3509,6 +3515,7 @@ static int ucc_geth_close(struct net_device *dev)
        free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
 
        netif_stop_queue(dev);
+       netdev_reset_queue(dev);
 
        return 0;
 }
index 8bcf470ff5f38a4e62842a5f31d5c0b45141ab85..25152715396bc04eda5e9d793a608ff7ae546071 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_HISILICON
        bool "Hisilicon devices"
        default y
-       depends on (OF || ACPI) && HAS_DMA
+       depends on OF || ACPI
        depends on ARM || ARM64 || COMPILE_TEST
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
@@ -85,10 +85,12 @@ config HNS3
          drivers(like ODP)to register with HNAE devices and their associated
          operations.
 
+if HNS3
+
 config HNS3_HCLGE
        tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
+       default m
        depends on PCI_MSI
-       depends on HNS3
        ---help---
          This selects the HNS3_HCLGE network acceleration engine & its hardware
          compatibility layer. The engine would be used in Hisilicon hip08 family of
@@ -97,16 +99,15 @@ config HNS3_HCLGE
 config HNS3_DCB
        bool "Hisilicon HNS3 Data Center Bridge Support"
        default n
-       depends on HNS3 && HNS3_HCLGE && DCB
+       depends on HNS3_HCLGE && DCB
        ---help---
          Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
 
          If unsure, say N.
 
 config HNS3_HCLGEVF
-    tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
-    depends on PCI_MSI
-    depends on HNS3
+       tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+       depends on PCI_MSI
        depends on HNS3_HCLGE
     ---help---
          This selects the HNS3 VF drivers network acceleration engine & its hardware
@@ -115,11 +116,13 @@ config HNS3_HCLGEVF
 
 config HNS3_ENET
        tristate "Hisilicon HNS3 Ethernet Device Support"
+       default m
        depends on 64BIT && PCI
-       depends on HNS3
        ---help---
          This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
          family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
          devices and their associated operations.
 
+endif #HNS3
+
 endif # NET_VENDOR_HISILICON
index 340e28211135a266b5a955ef432f6e8786d4f23b..14374a856d3091a9489e92669c7d93e3e797a115 100644 (file)
@@ -904,7 +904,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
                hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
 
        hip04_config_fifo(priv);
-       random_ether_addr(ndev->dev_addr);
+       eth_random_addr(ndev->dev_addr);
        hip04_update_mac_address(ndev);
 
        ret = hip04_alloc_ring(ndev, d);
index ef9ef703d13a0e0efff11404afb41532f571283f..948b3e0d18f4d89d9eeee371cacd1753eb734d91 100644 (file)
@@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev,
 
 static u16
 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
-                    void *accel_priv, select_queue_fallback_t fallback)
+                    struct net_device *sb_dev,
+                    select_queue_fallback_t fallback)
 {
        struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
        struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -2032,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
            is_multicast_ether_addr(eth_hdr->h_dest))
                return 0;
        else
-               return fallback(ndev, skb);
+               return fallback(ndev, skb, NULL);
 }
 
 static const struct net_device_ops hns_nic_netdev_ops = {
index 9d79dad2c6aae0f9bdbddeac1659ae4f08751d3d..0762ad18fdcc95af6d9a4926ddcbd2430c70477a 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/list.h>
-#include <linux/slab.h>
 #include <linux/spinlock.h>
 
 #include "hnae3.h"
@@ -41,13 +40,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
 {
        switch (client->type) {
        case HNAE3_CLIENT_KNIC:
-               hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
+               hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
                break;
        case HNAE3_CLIENT_UNIC:
-               hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
+               hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
                break;
        case HNAE3_CLIENT_ROCE:
-               hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
+               hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
                break;
        default:
                break;
@@ -61,16 +60,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
 
        switch (client->type) {
        case HNAE3_CLIENT_KNIC:
-               inited = hnae_get_bit(ae_dev->flag,
+               inited = hnae3_get_bit(ae_dev->flag,
                                       HNAE3_KNIC_CLIENT_INITED_B);
                break;
        case HNAE3_CLIENT_UNIC:
-               inited = hnae_get_bit(ae_dev->flag,
+               inited = hnae3_get_bit(ae_dev->flag,
                                       HNAE3_UNIC_CLIENT_INITED_B);
                break;
        case HNAE3_CLIENT_ROCE:
-               inited = hnae_get_bit(ae_dev->flag,
-                                     HNAE3_ROCE_CLIENT_INITED_B);
+               inited = hnae3_get_bit(ae_dev->flag,
+                                      HNAE3_ROCE_CLIENT_INITED_B);
                break;
        default:
                break;
@@ -86,7 +85,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
 
        /* check if this client matches the type of ae_dev */
        if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
-             hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
+             hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
                return 0;
        }
 
@@ -95,7 +94,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
                ret = ae_dev->ops->init_client_instance(client, ae_dev);
                if (ret) {
                        dev_err(&ae_dev->pdev->dev,
-                               "fail to instantiate client\n");
+                               "fail to instantiate client, ret = %d\n", ret);
                        return ret;
                }
 
@@ -135,7 +134,8 @@ int hnae3_register_client(struct hnae3_client *client)
                ret = hnae3_match_n_instantiate(client, ae_dev, true);
                if (ret)
                        dev_err(&ae_dev->pdev->dev,
-                               "match and instantiation failed for port\n");
+                               "match and instantiation failed for port, ret = %d\n",
+                               ret);
        }
 
 exit:
@@ -185,11 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
                ae_dev->ops = ae_algo->ops;
                ret = ae_algo->ops->init_ae_dev(ae_dev);
                if (ret) {
-                       dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n");
+                       dev_err(&ae_dev->pdev->dev,
+                               "init ae_dev error, ret = %d\n", ret);
                        continue;
                }
 
-               hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+               hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
 
                /* check the client list for the match with this ae_dev type and
                 * initialize the figure out client instance
@@ -198,7 +199,8 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
                        ret = hnae3_match_n_instantiate(client, ae_dev, true);
                        if (ret)
                                dev_err(&ae_dev->pdev->dev,
-                                       "match and instantiation failed\n");
+                                       "match and instantiation failed, ret = %d\n",
+                                       ret);
                }
        }
 
@@ -218,7 +220,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
        mutex_lock(&hnae3_common_lock);
        /* Check if there are matched ae_dev */
        list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
-               if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+               if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
                        continue;
 
                id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -232,7 +234,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
                        hnae3_match_n_instantiate(client, ae_dev, false);
 
                ae_algo->ops->uninit_ae_dev(ae_dev);
-               hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+               hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
        }
 
        list_del(&ae_algo->node);
@@ -271,11 +273,12 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
                /* ae_dev init should set flag */
                ret = ae_dev->ops->init_ae_dev(ae_dev);
                if (ret) {
-                       dev_err(&ae_dev->pdev->dev, "init ae_dev error\n");
+                       dev_err(&ae_dev->pdev->dev,
+                               "init ae_dev error, ret = %d\n", ret);
                        goto out_err;
                }
 
-               hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+               hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
                break;
        }
 
@@ -286,7 +289,8 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
                ret = hnae3_match_n_instantiate(client, ae_dev, true);
                if (ret)
                        dev_err(&ae_dev->pdev->dev,
-                               "match and instantiation failed\n");
+                               "match and instantiation failed, ret = %d\n",
+                               ret);
        }
 
 out_err:
@@ -306,7 +310,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
        mutex_lock(&hnae3_common_lock);
        /* Check if there are matched ae_algo */
        list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
-               if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+               if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
                        continue;
 
                id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
@@ -317,7 +321,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
                        hnae3_match_n_instantiate(client, ae_dev, false);
 
                ae_algo->ops->uninit_ae_dev(ae_dev);
-               hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+               hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
        }
 
        list_del(&ae_dev->node);
index 8acb1d116a0282c69129c5d7a2b4eb9140f25764..da806fdfbbe6b6fd493762112c721aa2d1755b2b 100644 (file)
                BIT(HNAE3_DEV_SUPPORT_ROCE_B))
 
 #define hnae3_dev_roce_supported(hdev) \
-       hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+       hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
 
 #define hnae3_dev_dcb_supported(hdev) \
-       hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+       hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
 
 #define ring_ptr_move_fw(ring, p) \
        ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -167,7 +167,6 @@ struct hnae3_client_ops {
 #define HNAE3_CLIENT_NAME_LENGTH 16
 struct hnae3_client {
        char name[HNAE3_CLIENT_NAME_LENGTH];
-       u16 version;
        unsigned long state;
        enum hnae3_client_type type;
        const struct hnae3_client_ops *ops;
@@ -436,7 +435,6 @@ struct hnae3_dcb_ops {
 struct hnae3_ae_algo {
        const struct hnae3_ae_ops *ops;
        struct list_head node;
-       char name[HNAE3_CLASS_NAME_SIZE];
        const struct pci_device_id *pdev_id_table;
 };
 
@@ -509,17 +507,17 @@ struct hnae3_handle {
        u32 numa_node_mask;     /* for multi-chip support */
 };
 
-#define hnae_set_field(origin, mask, shift, val) \
+#define hnae3_set_field(origin, mask, shift, val) \
        do { \
                (origin) &= (~(mask)); \
                (origin) |= ((val) << (shift)) & (mask); \
        } while (0)
-#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
 
-#define hnae_set_bit(origin, shift, val) \
-       hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
-#define hnae_get_bit(origin, shift) \
-       hnae_get_field((origin), (0x1 << (shift)), (shift))
+#define hnae3_set_bit(origin, shift, val) \
+       hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
+#define hnae3_get_bit(origin, shift) \
+       hnae3_get_field((origin), (0x1 << (shift)), (shift))
 
 void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
 void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
index 25a73bb2e642dde42ae59f10369e7d3fefcb53f7..29be96e5cc476f479200690ad7718d5718805ace 100644 (file)
@@ -239,7 +239,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
        struct hnae3_handle *h = hns3_get_handle(netdev);
        struct hnae3_knic_private_info *kinfo = &h->kinfo;
        unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
-       int ret;
+       int i, ret;
+
+       if (kinfo->num_tc <= 1) {
+               netdev_reset_tc(netdev);
+       } else {
+               ret = netdev_set_num_tc(netdev, kinfo->num_tc);
+               if (ret) {
+                       netdev_err(netdev,
+                                  "netdev_set_num_tc fail, ret=%d!\n", ret);
+                       return ret;
+               }
+
+               for (i = 0; i < HNAE3_MAX_TC; i++) {
+                       if (!kinfo->tc_info[i].enable)
+                               continue;
+
+                       netdev_set_tc_queue(netdev,
+                                           kinfo->tc_info[i].tc,
+                                           kinfo->tc_info[i].tqp_count,
+                                           kinfo->tc_info[i].tqp_offset);
+               }
+       }
 
        ret = netif_set_real_num_tx_queues(netdev, queue_size);
        if (ret) {
@@ -312,7 +333,9 @@ static int hns3_nic_net_up(struct net_device *netdev)
 static int hns3_nic_net_open(struct net_device *netdev)
 {
        struct hns3_nic_priv *priv = netdev_priv(netdev);
-       int ret;
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+       struct hnae3_knic_private_info *kinfo;
+       int i, ret;
 
        netif_carrier_off(netdev);
 
@@ -327,6 +350,12 @@ static int hns3_nic_net_open(struct net_device *netdev)
                return ret;
        }
 
+       kinfo = &h->kinfo;
+       for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+               netdev_set_prio_tc_map(netdev, i,
+                                      kinfo->prio_tc[i]);
+       }
+
        priv->ae_handle->last_reset_time = jiffies;
        return 0;
 }
@@ -493,8 +522,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
 
        /* find the txbd field values */
        *paylen = skb->len - hdr_len;
-       hnae_set_bit(*type_cs_vlan_tso,
-                    HNS3_TXD_TSO_B, 1);
+       hnae3_set_bit(*type_cs_vlan_tso,
+                     HNS3_TXD_TSO_B, 1);
 
        /* get MSS for TSO */
        *mss = skb_shinfo(skb)->gso_size;
@@ -586,21 +615,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
 
        /* compute L2 header size for normal packet, defined in 2 Bytes */
        l2_len = l3.hdr - skb->data;
-       hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
-                      HNS3_TXD_L2LEN_S, l2_len >> 1);
+       hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+                       HNS3_TXD_L2LEN_S, l2_len >> 1);
 
        /* tunnel packet*/
        if (skb->encapsulation) {
                /* compute OL2 header size, defined in 2 Bytes */
                ol2_len = l2_len;
-               hnae_set_field(*ol_type_vlan_len_msec,
-                              HNS3_TXD_L2LEN_M,
-                              HNS3_TXD_L2LEN_S, ol2_len >> 1);
+               hnae3_set_field(*ol_type_vlan_len_msec,
+                               HNS3_TXD_L2LEN_M,
+                               HNS3_TXD_L2LEN_S, ol2_len >> 1);
 
                /* compute OL3 header size, defined in 4 Bytes */
                ol3_len = l4.hdr - l3.hdr;
-               hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
-                              HNS3_TXD_L3LEN_S, ol3_len >> 2);
+               hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
+                               HNS3_TXD_L3LEN_S, ol3_len >> 2);
 
                /* MAC in UDP, MAC in GRE (0x6558)*/
                if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
@@ -609,16 +638,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
 
                        /* compute OL4 header size, defined in 4 Bytes. */
                        ol4_len = l2_hdr - l4.hdr;
-                       hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
-                                      HNS3_TXD_L4LEN_S, ol4_len >> 2);
+                       hnae3_set_field(*ol_type_vlan_len_msec,
+                                       HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
+                                       ol4_len >> 2);
 
                        /* switch IP header ptr from outer to inner header */
                        l3.hdr = skb_inner_network_header(skb);
 
                        /* compute inner l2 header size, defined in 2 Bytes. */
                        l2_len = l3.hdr - l2_hdr;
-                       hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
-                                      HNS3_TXD_L2LEN_S, l2_len >> 1);
+                       hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
+                                       HNS3_TXD_L2LEN_S, l2_len >> 1);
                } else {
                        /* skb packet types not supported by hardware,
                         * txbd len fild doesn't be filled.
@@ -634,22 +664,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
 
        /* compute inner(/normal) L3 header size, defined in 4 Bytes */
        l3_len = l4.hdr - l3.hdr;
-       hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
-                      HNS3_TXD_L3LEN_S, l3_len >> 2);
+       hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
+                       HNS3_TXD_L3LEN_S, l3_len >> 2);
 
        /* compute inner(/normal) L4 header size, defined in 4 Bytes */
        switch (l4_proto) {
        case IPPROTO_TCP:
-               hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
-                              HNS3_TXD_L4LEN_S, l4.tcp->doff);
+               hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+                               HNS3_TXD_L4LEN_S, l4.tcp->doff);
                break;
        case IPPROTO_SCTP:
-               hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
-                              HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
+               hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+                               HNS3_TXD_L4LEN_S,
+                               (sizeof(struct sctphdr) >> 2));
                break;
        case IPPROTO_UDP:
-               hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
-                              HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
+               hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
+                               HNS3_TXD_L4LEN_S,
+                               (sizeof(struct udphdr) >> 2));
                break;
        default:
                /* skb packet types not supported by hardware,
@@ -703,32 +735,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
                /* define outer network header type.*/
                if (skb->protocol == htons(ETH_P_IP)) {
                        if (skb_is_gso(skb))
-                               hnae_set_field(*ol_type_vlan_len_msec,
-                                              HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
-                                              HNS3_OL3T_IPV4_CSUM);
+                               hnae3_set_field(*ol_type_vlan_len_msec,
+                                               HNS3_TXD_OL3T_M,
+                                               HNS3_TXD_OL3T_S,
+                                               HNS3_OL3T_IPV4_CSUM);
                        else
-                               hnae_set_field(*ol_type_vlan_len_msec,
-                                              HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
-                                              HNS3_OL3T_IPV4_NO_CSUM);
+                               hnae3_set_field(*ol_type_vlan_len_msec,
+                                               HNS3_TXD_OL3T_M,
+                                               HNS3_TXD_OL3T_S,
+                                               HNS3_OL3T_IPV4_NO_CSUM);
 
                } else if (skb->protocol == htons(ETH_P_IPV6)) {
-                       hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
-                                      HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
+                       hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
+                                       HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
                }
 
                /* define tunnel type(OL4).*/
                switch (l4_proto) {
                case IPPROTO_UDP:
-                       hnae_set_field(*ol_type_vlan_len_msec,
-                                      HNS3_TXD_TUNTYPE_M,
-                                      HNS3_TXD_TUNTYPE_S,
-                                      HNS3_TUN_MAC_IN_UDP);
+                       hnae3_set_field(*ol_type_vlan_len_msec,
+                                       HNS3_TXD_TUNTYPE_M,
+                                       HNS3_TXD_TUNTYPE_S,
+                                       HNS3_TUN_MAC_IN_UDP);
                        break;
                case IPPROTO_GRE:
-                       hnae_set_field(*ol_type_vlan_len_msec,
-                                      HNS3_TXD_TUNTYPE_M,
-                                      HNS3_TXD_TUNTYPE_S,
-                                      HNS3_TUN_NVGRE);
+                       hnae3_set_field(*ol_type_vlan_len_msec,
+                                       HNS3_TXD_TUNTYPE_M,
+                                       HNS3_TXD_TUNTYPE_S,
+                                       HNS3_TUN_NVGRE);
                        break;
                default:
                        /* drop the skb tunnel packet if hardware don't support,
@@ -749,43 +783,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
        }
 
        if (l3.v4->version == 4) {
-               hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
-                              HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
+               hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+                               HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
 
                /* the stack computes the IP header already, the only time we
                 * need the hardware to recompute it is in the case of TSO.
                 */
                if (skb_is_gso(skb))
-                       hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
-
-               hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+                       hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
        } else if (l3.v6->version == 6) {
-               hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
-                              HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
-               hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+               hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
+                               HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
        }
 
        switch (l4_proto) {
        case IPPROTO_TCP:
-               hnae_set_field(*type_cs_vlan_tso,
-                              HNS3_TXD_L4T_M,
-                              HNS3_TXD_L4T_S,
-                              HNS3_L4T_TCP);
+               hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+               hnae3_set_field(*type_cs_vlan_tso,
+                               HNS3_TXD_L4T_M,
+                               HNS3_TXD_L4T_S,
+                               HNS3_L4T_TCP);
                break;
        case IPPROTO_UDP:
                if (hns3_tunnel_csum_bug(skb))
                        break;
 
-               hnae_set_field(*type_cs_vlan_tso,
-                              HNS3_TXD_L4T_M,
-                              HNS3_TXD_L4T_S,
-                              HNS3_L4T_UDP);
+               hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+               hnae3_set_field(*type_cs_vlan_tso,
+                               HNS3_TXD_L4T_M,
+                               HNS3_TXD_L4T_S,
+                               HNS3_L4T_UDP);
                break;
        case IPPROTO_SCTP:
-               hnae_set_field(*type_cs_vlan_tso,
-                              HNS3_TXD_L4T_M,
-                              HNS3_TXD_L4T_S,
-                              HNS3_L4T_SCTP);
+               hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+               hnae3_set_field(*type_cs_vlan_tso,
+                               HNS3_TXD_L4T_M,
+                               HNS3_TXD_L4T_S,
+                               HNS3_L4T_SCTP);
                break;
        default:
                /* drop the skb tunnel packet if hardware don't support,
@@ -807,11 +841,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
 {
        /* Config bd buffer end */
-       hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
-                      HNS3_TXD_BDTYPE_S, 0);
-       hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
-       hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
-       hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
+       hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
+                       HNS3_TXD_BDTYPE_S, 0);
+       hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
+       hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
+       hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
 }
 
 static int hns3_fill_desc_vtags(struct sk_buff *skb,
@@ -844,10 +878,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
                 * and use inner_vtag in one tag case.
                 */
                if (skb->protocol == htons(ETH_P_8021Q)) {
-                       hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+                       hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
                        *out_vtag = vlan_tag;
                } else {
-                       hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+                       hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
                        *inner_vtag = vlan_tag;
                }
        } else if (skb->protocol == htons(ETH_P_8021Q)) {
@@ -880,7 +914,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
        u16 out_vtag = 0;
        u32 paylen = 0;
        u16 mss = 0;
-       __be16 protocol;
        u8 ol4_proto;
        u8 il4_proto;
        int ret;
@@ -909,7 +942,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        skb_reset_mac_len(skb);
-                       protocol = skb->protocol;
 
                        ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
                        if (ret)
@@ -1135,7 +1167,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        wmb(); /* Commit all data before submit */
 
-       hnae_queue_xmit(ring->tqp, buf_num);
+       hnae3_queue_xmit(ring->tqp, buf_num);
 
        return NETDEV_TX_OK;
 
@@ -1304,7 +1336,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
        u16 mode = mqprio_qopt->mode;
        u8 hw = mqprio_qopt->qopt.hw;
        bool if_running;
-       unsigned int i;
        int ret;
 
        if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
@@ -1328,24 +1359,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
        if (ret)
                goto out;
 
-       if (tc <= 1) {
-               netdev_reset_tc(netdev);
-       } else {
-               ret = netdev_set_num_tc(netdev, tc);
-               if (ret)
-                       goto out;
-
-               for (i = 0; i < HNAE3_MAX_TC; i++) {
-                       if (!kinfo->tc_info[i].enable)
-                               continue;
-
-                       netdev_set_tc_queue(netdev,
-                                           kinfo->tc_info[i].tc,
-                                           kinfo->tc_info[i].tqp_count,
-                                           kinfo->tc_info[i].tqp_offset);
-               }
-       }
-
        ret = hns3_nic_set_real_num_queue(netdev);
 
 out:
@@ -1703,7 +1716,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
                             struct hns3_desc_cb *cb)
 {
-       unsigned int order = hnae_page_order(ring);
+       unsigned int order = hnae3_page_order(ring);
        struct page *p;
 
        p = dev_alloc_pages(order);
@@ -1714,7 +1727,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
        cb->page_offset = 0;
        cb->reuse_flag = 0;
        cb->buf  = page_address(p);
-       cb->length = hnae_page_size(ring);
+       cb->length = hnae3_page_size(ring);
        cb->type = DESC_TYPE_PAGE;
 
        return 0;
@@ -1780,33 +1793,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
 /* free desc along with its attached buffer */
 static void hns3_free_desc(struct hns3_enet_ring *ring)
 {
+       int size = ring->desc_num * sizeof(ring->desc[0]);
+
        hns3_free_buffers(ring);
 
-       dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
-                        ring->desc_num * sizeof(ring->desc[0]),
-                        DMA_BIDIRECTIONAL);
-       ring->desc_dma_addr = 0;
-       kfree(ring->desc);
-       ring->desc = NULL;
+       if (ring->desc) {
+               dma_free_coherent(ring_to_dev(ring), size,
+                                 ring->desc, ring->desc_dma_addr);
+               ring->desc = NULL;
+       }
 }
 
 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
 {
        int size = ring->desc_num * sizeof(ring->desc[0]);
 
-       ring->desc = kzalloc(size, GFP_KERNEL);
+       ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
+                                        &ring->desc_dma_addr,
+                                        GFP_KERNEL);
        if (!ring->desc)
                return -ENOMEM;
 
-       ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
-                                            size, DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
-               ring->desc_dma_addr = 0;
-               kfree(ring->desc);
-               ring->desc = NULL;
-               return -ENOMEM;
-       }
-
        return 0;
 }
 
@@ -1887,7 +1894,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
 
        (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
        (*bytes) += desc_cb->length;
-       /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+       /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
        hns3_free_buffer_detach(ring, ring->next_to_clean);
 
        ring_ptr_move_fw(ring, next_to_clean);
@@ -1917,7 +1924,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
        if (is_ring_empty(ring) || head == ring->next_to_clean)
                return true; /* no data to poll */
 
-       if (!is_valid_clean_head(ring, head)) {
+       if (unlikely(!is_valid_clean_head(ring, head))) {
                netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
                           ring->next_to_use, ring->next_to_clean);
 
@@ -2016,15 +2023,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
        bool twobufs;
 
        twobufs = ((PAGE_SIZE < 8192) &&
-               hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
+               hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
 
        desc = &ring->desc[ring->next_to_clean];
        size = le16_to_cpu(desc->rx.size);
 
-       truesize = hnae_buf_size(ring);
+       truesize = hnae3_buf_size(ring);
 
        if (!twobufs)
-               last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+               last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
 
        skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
                        size - pull_len, truesize);
@@ -2076,13 +2083,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
                return;
 
        /* check if hardware has done checksum */
-       if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
+       if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
                return;
 
-       if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
-                    hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
-                    hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
-                    hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
+       if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
+                    hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
+                    hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
+                    hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
                netdev_err(netdev, "L3/L4 error pkt\n");
                u64_stats_update_begin(&ring->syncp);
                ring->stats.l3l4_csum_err++;
@@ -2091,23 +2098,24 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
                return;
        }
 
-       l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
-                                HNS3_RXD_L3ID_S);
-       l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
-                                HNS3_RXD_L4ID_S);
+       l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+                                 HNS3_RXD_L3ID_S);
+       l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+                                 HNS3_RXD_L4ID_S);
 
-       ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
+       ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
+                                  HNS3_RXD_OL4ID_S);
        switch (ol4_type) {
        case HNS3_OL4_TYPE_MAC_IN_UDP:
        case HNS3_OL4_TYPE_NVGRE:
                skb->csum_level = 1;
        case HNS3_OL4_TYPE_NO_TUN:
                /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
-               if (l3_type == HNS3_L3_TYPE_IPV4 ||
-                   (l3_type == HNS3_L3_TYPE_IPV6 &&
-                    (l4_type == HNS3_L4_TYPE_UDP ||
-                     l4_type == HNS3_L4_TYPE_TCP ||
-                     l4_type == HNS3_L4_TYPE_SCTP)))
+               if ((l3_type == HNS3_L3_TYPE_IPV4 ||
+                    l3_type == HNS3_L3_TYPE_IPV6) &&
+                   (l4_type == HNS3_L4_TYPE_UDP ||
+                    l4_type == HNS3_L4_TYPE_TCP ||
+                    l4_type == HNS3_L4_TYPE_SCTP))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                break;
        }
@@ -2135,8 +2143,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
 #define HNS3_STRP_OUTER_VLAN   0x1
 #define HNS3_STRP_INNER_VLAN   0x2
 
-       switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
-                              HNS3_RXD_STRP_TAGP_S)) {
+       switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+                               HNS3_RXD_STRP_TAGP_S)) {
        case HNS3_STRP_OUTER_VLAN:
                vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
                break;
@@ -2174,7 +2182,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
 
        /* Check valid BD */
-       if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+       if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
                return -EFAULT;
 
        va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
@@ -2229,7 +2237,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
                hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
                ring_ptr_move_fw(ring, next_to_clean);
 
-               while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+               while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
                        desc = &ring->desc[ring->next_to_clean];
                        desc_cb = &ring->desc_cb[ring->next_to_clean];
                        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
@@ -2257,7 +2265,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
                                               vlan_tag);
        }
 
-       if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
+       if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
                netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
                           ((u64 *)desc)[0], ((u64 *)desc)[1]);
                u64_stats_update_begin(&ring->syncp);
@@ -2269,7 +2277,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
        }
 
        if (unlikely((!desc->rx.pkt_len) ||
-                    hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
+                    hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
                netdev_err(netdev, "truncated pkt\n");
                u64_stats_update_begin(&ring->syncp);
                ring->stats.err_pkt_len++;
@@ -2279,7 +2287,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
                return -EFAULT;
        }
 
-       if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
+       if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
                netdev_err(netdev, "L2 error pkt\n");
                u64_stats_update_begin(&ring->syncp);
                ring->stats.l2_err++;
@@ -2532,10 +2540,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
        tx_ring = tqp_vector->tx_group.ring;
        if (tx_ring) {
                cur_chain->tqp_index = tx_ring->tqp->tqp_index;
-               hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
-                            HNAE3_RING_TYPE_TX);
-               hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
-                              HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
+               hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+                             HNAE3_RING_TYPE_TX);
+               hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+                               HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
 
                cur_chain->next = NULL;
 
@@ -2549,12 +2557,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
 
                        cur_chain->next = chain;
                        chain->tqp_index = tx_ring->tqp->tqp_index;
-                       hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
-                                    HNAE3_RING_TYPE_TX);
-                       hnae_set_field(chain->int_gl_idx,
-                                      HNAE3_RING_GL_IDX_M,
-                                      HNAE3_RING_GL_IDX_S,
-                                      HNAE3_RING_GL_TX);
+                       hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+                                     HNAE3_RING_TYPE_TX);
+                       hnae3_set_field(chain->int_gl_idx,
+                                       HNAE3_RING_GL_IDX_M,
+                                       HNAE3_RING_GL_IDX_S,
+                                       HNAE3_RING_GL_TX);
 
                        cur_chain = chain;
                }
@@ -2564,10 +2572,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
        if (!tx_ring && rx_ring) {
                cur_chain->next = NULL;
                cur_chain->tqp_index = rx_ring->tqp->tqp_index;
-               hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
-                            HNAE3_RING_TYPE_RX);
-               hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
-                              HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+               hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+                             HNAE3_RING_TYPE_RX);
+               hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+                               HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
 
                rx_ring = rx_ring->next;
        }
@@ -2579,10 +2587,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
 
                cur_chain->next = chain;
                chain->tqp_index = rx_ring->tqp->tqp_index;
-               hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
-                            HNAE3_RING_TYPE_RX);
-               hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
-                              HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+               hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+                             HNAE3_RING_TYPE_RX);
+               hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+                               HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
 
                cur_chain = chain;
 
@@ -2745,10 +2753,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
                if (ret)
                        return ret;
 
-               ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
-               if (ret)
-                       return ret;
-
                hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
 
                if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
@@ -2809,7 +2813,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
                ring->io_base = q->io_base;
        }
 
-       hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
+       hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
 
        ring->tqp = q;
        ring->desc = NULL;
@@ -3081,7 +3085,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
        priv->dev = &pdev->dev;
        priv->netdev = netdev;
        priv->ae_handle = handle;
-       priv->ae_handle->reset_level = HNAE3_NONE_RESET;
        priv->ae_handle->last_reset_time = jiffies;
        priv->tx_timeout_count = 0;
 
@@ -3102,6 +3105,11 @@ static int hns3_client_init(struct hnae3_handle *handle)
        /* Carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
 
+       if (handle->flags & HNAE3_SUPPORT_VF)
+               handle->reset_level = HNAE3_VF_RESET;
+       else
+               handle->reset_level = HNAE3_FUNC_RESET;
+
        ret = hns3_get_ring_config(priv);
        if (ret) {
                ret = -ENOMEM;
@@ -3208,7 +3216,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
        struct net_device *ndev = kinfo->netdev;
        bool if_running;
        int ret;
-       u8 i;
 
        if (tc > HNAE3_MAX_TC)
                return -EINVAL;
@@ -3218,10 +3225,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
 
        if_running = netif_running(ndev);
 
-       ret = netdev_set_num_tc(ndev, tc);
-       if (ret)
-               return ret;
-
        if (if_running) {
                (void)hns3_nic_net_stop(ndev);
                msleep(100);
@@ -3232,27 +3235,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
        if (ret)
                goto err_out;
 
-       if (tc <= 1) {
-               netdev_reset_tc(ndev);
-               goto out;
-       }
-
-       for (i = 0; i < HNAE3_MAX_TC; i++) {
-               struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
-
-               if (tc_info->enable)
-                       netdev_set_tc_queue(ndev,
-                                           tc_info->tc,
-                                           tc_info->tqp_count,
-                                           tc_info->tqp_offset);
-       }
-
-       for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
-               netdev_set_prio_tc_map(ndev, i,
-                                      kinfo->prio_tc[i]);
-       }
-
-out:
        ret = hns3_nic_set_real_num_queue(ndev);
 
 err_out:
@@ -3418,7 +3400,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
        struct net_device *ndev = kinfo->netdev;
 
        if (!netif_running(ndev))
-               return -EIO;
+               return 0;
 
        return hns3_nic_net_stop(ndev);
 }
@@ -3458,10 +3440,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
        /* Carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
 
-       ret = hns3_get_ring_config(priv);
-       if (ret)
-               return ret;
-
        ret = hns3_nic_init_vector_data(priv);
        if (ret)
                return ret;
@@ -3493,10 +3471,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
        if (ret)
                netdev_err(netdev, "uninit ring error\n");
 
-       hns3_put_ring_config(priv);
-
-       priv->ring_data = NULL;
-
        hns3_uninit_mac_addr(netdev);
 
        return ret;
index 3b083d5ae9ce25832fdd01ddabd0be999804577f..bf9aa02be9941670772b99421843ed699872804f 100644 (file)
@@ -499,7 +499,6 @@ struct hns3_enet_tqp_vector {
 
        u16 num_tqps;   /* total number of tqps in TQP vector */
 
-       cpumask_t affinity_mask;
        char name[HNAE3_INT_NAME_LEN];
 
        /* when 0 should adjust interrupt coalesce parameter */
@@ -591,7 +590,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
 #define hns3_write_dev(a, reg, value) \
        hns3_write_reg((a)->io_base, (reg), (value))
 
-#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
+#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
                (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
 
 #define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
@@ -601,9 +600,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
 
 #define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
 
-#define hnae_buf_size(_ring) ((_ring)->buf_size)
-#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
-#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+#define hnae3_buf_size(_ring) ((_ring)->buf_size)
+#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
+#define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))
 
 /* iterator for handling rings in ring group */
 #define hns3_for_each_ring(pos, head) \
index 40c0425b4023bd98a1ca2d926efc02fe5d378155..11620e003a8ed2bee23a4e977d7edcc04243e250 100644 (file)
@@ -201,7 +201,9 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
                rx_group = &ring->tqp_vector->rx_group;
                pre_rx_pkt = rx_group->total_packets;
 
+               preempt_disable();
                hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
+               preempt_enable();
 
                rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt);
                rx_group->total_packets = pre_rx_pkt;
index c36d64710fa69a94d911046a136cb231740f4c41..cf40afca66dbe4919b266712088050926e3c87ee 100644 (file)
@@ -18,8 +18,7 @@
 #include "hclge_main.h"
 
 #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
-#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
-       DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
 #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
 
 static int hclge_ring_space(struct hclge_cmq_ring *ring)
@@ -46,31 +45,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
 {
        int size  = ring->desc_num * sizeof(struct hclge_desc);
 
-       ring->desc = kzalloc(size, GFP_KERNEL);
+       ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
+                                        size, &ring->desc_dma_addr,
+                                        GFP_KERNEL);
        if (!ring->desc)
                return -ENOMEM;
 
-       ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
-                                            size, DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
-               ring->desc_dma_addr = 0;
-               kfree(ring->desc);
-               ring->desc = NULL;
-               return -ENOMEM;
-       }
-
        return 0;
 }
 
 static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
 {
-       dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
-                        ring->desc_num * sizeof(ring->desc[0]),
-                        DMA_BIDIRECTIONAL);
+       int size  = ring->desc_num * sizeof(struct hclge_desc);
 
-       ring->desc_dma_addr = 0;
-       kfree(ring->desc);
-       ring->desc = NULL;
+       if (ring->desc) {
+               dma_free_coherent(cmq_ring_to_dev(ring), size,
+                                 ring->desc, ring->desc_dma_addr);
+               ring->desc = NULL;
+       }
 }
 
 static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
@@ -111,8 +103,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
 
        if (is_read)
                desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
-       else
-               desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
 }
 
 static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
@@ -123,24 +113,24 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
 
        if (ring->flag == HCLGE_TYPE_CSQ) {
                hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
-                               (u32)dma);
+                               lower_32_bits(dma));
                hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
-                               (u32)((dma >> 31) >> 1));
+                               upper_32_bits(dma));
                hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
                                (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
                                HCLGE_NIC_CMQ_ENABLE);
-               hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
                hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
+               hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
        } else {
                hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
-                               (u32)dma);
+                               lower_32_bits(dma));
                hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
-                               (u32)((dma >> 31) >> 1));
+                               upper_32_bits(dma));
                hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
                                (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
                                HCLGE_NIC_CMQ_ENABLE);
-               hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
                hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
+               hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
        }
 }
 
@@ -152,33 +142,22 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw)
 
 static int hclge_cmd_csq_clean(struct hclge_hw *hw)
 {
-       struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
+       struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
        struct hclge_cmq_ring *csq = &hw->cmq.csq;
-       u16 ntc = csq->next_to_clean;
-       struct hclge_desc *desc;
-       int clean = 0;
        u32 head;
+       int clean;
 
-       desc = &csq->desc[ntc];
        head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
        rmb(); /* Make sure head is ready before touch any data */
 
        if (!is_valid_csq_clean_head(csq, head)) {
-               dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
-                          csq->next_to_use, csq->next_to_clean);
+               dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+                        csq->next_to_use, csq->next_to_clean);
                return 0;
        }
 
-       while (head != ntc) {
-               memset(desc, 0, sizeof(*desc));
-               ntc++;
-               if (ntc == csq->desc_num)
-                       ntc = 0;
-               desc = &csq->desc[ntc];
-               clean++;
-       }
-       csq->next_to_clean = ntc;
-
+       clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+       csq->next_to_clean = head;
        return clean;
 }
 
@@ -216,7 +195,7 @@ static bool hclge_is_special_opcode(u16 opcode)
  **/
 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
 {
-       struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
+       struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
        struct hclge_desc *desc_to_use;
        bool complete = false;
        u32 timeout = 0;
@@ -227,7 +206,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
 
        spin_lock_bh(&hw->cmq.csq.lock);
 
-       if (num > hclge_ring_space(&hw->cmq.csq)) {
+       if (num > hclge_ring_space(&hw->cmq.csq) ||
+           test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
                spin_unlock_bh(&hw->cmq.csq.lock);
                return -EBUSY;
        }
@@ -256,33 +236,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
         */
        if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
                do {
-                       if (hclge_cmd_csq_done(hw))
+                       if (hclge_cmd_csq_done(hw)) {
+                               complete = true;
                                break;
+                       }
                        udelay(1);
                        timeout++;
                } while (timeout < hw->cmq.tx_timeout);
        }
 
-       if (hclge_cmd_csq_done(hw)) {
-               complete = true;
+       if (!complete) {
+               retval = -EAGAIN;
+       } else {
                handle = 0;
                while (handle < num) {
                        /* Get the result of hardware write back */
                        desc_to_use = &hw->cmq.csq.desc[ntc];
                        desc[handle] = *desc_to_use;
-                       pr_debug("Get cmd desc:\n");
 
                        if (likely(!hclge_is_special_opcode(opcode)))
                                desc_ret = le16_to_cpu(desc[handle].retval);
                        else
                                desc_ret = le16_to_cpu(desc[0].retval);
 
-                       if ((enum hclge_cmd_return_status)desc_ret ==
-                           HCLGE_CMD_EXEC_SUCCESS)
+                       if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
                                retval = 0;
                        else
                                retval = -EIO;
-                       hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
+                       hw->cmq.last_status = desc_ret;
                        ntc++;
                        handle++;
                        if (ntc == hw->cmq.csq.desc_num)
@@ -290,9 +271,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
                }
        }
 
-       if (!complete)
-               retval = -EAGAIN;
-
        /* Clean the command send queue */
        handle = hclge_cmd_csq_clean(hw);
        if (handle != num) {
@@ -369,6 +347,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
        spin_lock_init(&hdev->hw.cmq.crq.lock);
 
        hclge_cmd_init_regs(&hdev->hw);
+       clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
 
        ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
        if (ret) {
index d9aaa76c76eb40cf2e7e1991851f8b2abe299e7d..656c3e622ec8c4c42db779f437f4f0ab2a4835ba 100644 (file)
@@ -571,7 +571,8 @@ struct hclge_config_auto_neg_cmd {
 
 struct hclge_config_max_frm_size_cmd {
        __le16  max_frm_size;
-       u8      rsv[22];
+       u8      min_frm_size;
+       u8      rsv[21];
 };
 
 enum hclge_mac_vlan_tbl_opcode {
index d318d35e598fd61aa66406adb8048113f0252364..266c68607e5370e10386c84b531c01357512fd50 100644 (file)
@@ -939,8 +939,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
 
        if (hnae3_dev_roce_supported(hdev)) {
                hdev->num_roce_msi =
-               hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
-                              HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+               hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+                               HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
 
                /* PF should have NIC vectors and Roce vectors,
                 * NIC vectors are queued before Roce vectors.
@@ -948,8 +948,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
                hdev->num_msi = hdev->num_roce_msi  + HCLGE_ROCE_VECTOR_OFFSET;
        } else {
                hdev->num_msi =
-               hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
-                              HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+               hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+                               HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
        }
 
        return 0;
@@ -1038,38 +1038,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
        req = (struct hclge_cfg_param_cmd *)desc[0].data;
 
        /* get the configuration */
-       cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
-                                            HCLGE_CFG_VMDQ_M,
-                                            HCLGE_CFG_VMDQ_S);
-       cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
-                                    HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
-       cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
-                                          HCLGE_CFG_TQP_DESC_N_M,
-                                          HCLGE_CFG_TQP_DESC_N_S);
-
-       cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
-                                      HCLGE_CFG_PHY_ADDR_M,
-                                      HCLGE_CFG_PHY_ADDR_S);
-       cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
-                                        HCLGE_CFG_MEDIA_TP_M,
-                                        HCLGE_CFG_MEDIA_TP_S);
-       cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
-                                        HCLGE_CFG_RX_BUF_LEN_M,
-                                        HCLGE_CFG_RX_BUF_LEN_S);
+       cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+                                             HCLGE_CFG_VMDQ_M,
+                                             HCLGE_CFG_VMDQ_S);
+       cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+                                     HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
+       cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+                                           HCLGE_CFG_TQP_DESC_N_M,
+                                           HCLGE_CFG_TQP_DESC_N_S);
+
+       cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
+                                       HCLGE_CFG_PHY_ADDR_M,
+                                       HCLGE_CFG_PHY_ADDR_S);
+       cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
+                                         HCLGE_CFG_MEDIA_TP_M,
+                                         HCLGE_CFG_MEDIA_TP_S);
+       cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
+                                         HCLGE_CFG_RX_BUF_LEN_M,
+                                         HCLGE_CFG_RX_BUF_LEN_S);
        /* get mac_address */
        mac_addr_tmp = __le32_to_cpu(req->param[2]);
-       mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
-                                          HCLGE_CFG_MAC_ADDR_H_M,
-                                          HCLGE_CFG_MAC_ADDR_H_S);
+       mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
+                                           HCLGE_CFG_MAC_ADDR_H_M,
+                                           HCLGE_CFG_MAC_ADDR_H_S);
 
        mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
 
-       cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
-                                           HCLGE_CFG_DEFAULT_SPEED_M,
-                                           HCLGE_CFG_DEFAULT_SPEED_S);
-       cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
-                                          HCLGE_CFG_RSS_SIZE_M,
-                                          HCLGE_CFG_RSS_SIZE_S);
+       cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
+                                            HCLGE_CFG_DEFAULT_SPEED_M,
+                                            HCLGE_CFG_DEFAULT_SPEED_S);
+       cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
+                                           HCLGE_CFG_RSS_SIZE_M,
+                                           HCLGE_CFG_RSS_SIZE_S);
 
        for (i = 0; i < ETH_ALEN; i++)
                cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1077,9 +1077,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
        req = (struct hclge_cfg_param_cmd *)desc[1].data;
        cfg->numa_node_map = __le32_to_cpu(req->param[0]);
 
-       cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]),
-                                           HCLGE_CFG_SPEED_ABILITY_M,
-                                           HCLGE_CFG_SPEED_ABILITY_S);
+       cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
+                                            HCLGE_CFG_SPEED_ABILITY_M,
+                                            HCLGE_CFG_SPEED_ABILITY_S);
 }
 
 /* hclge_get_cfg: query the static parameter from flash
@@ -1098,11 +1098,11 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
                req = (struct hclge_cfg_param_cmd *)desc[i].data;
                hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
                                           true);
-               hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
-                              HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
+               hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
+                               HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
                /* Len should be united by 4 bytes when send to hardware */
-               hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
-                              HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
+               hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
+                               HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
                req->offset = cpu_to_le32(offset);
        }
 
@@ -1189,7 +1189,7 @@ static int hclge_configure(struct hclge_dev *hdev)
 
        /* Currently not support uncontiuous tc */
        for (i = 0; i < hdev->tm_info.num_tc; i++)
-               hnae_set_bit(hdev->hw_tc_map, i, 1);
+               hnae3_set_bit(hdev->hw_tc_map, i, 1);
 
        hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
 
@@ -1208,13 +1208,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
        req = (struct hclge_cfg_tso_status_cmd *)desc.data;
 
        tso_mss = 0;
-       hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
-                      HCLGE_TSO_MSS_MIN_S, tso_mss_min);
+       hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+                       HCLGE_TSO_MSS_MIN_S, tso_mss_min);
        req->tso_mss_min = cpu_to_le16(tso_mss);
 
        tso_mss = 0;
-       hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
-                      HCLGE_TSO_MSS_MIN_S, tso_mss_max);
+       hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+                       HCLGE_TSO_MSS_MIN_S, tso_mss_max);
        req->tso_mss_max = cpu_to_le16(tso_mss);
 
        return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1834,8 +1834,6 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
        return 0;
 }
 
-#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
-
 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
                                   struct hclge_pkt_buf_alloc *buf_alloc)
 {
@@ -1863,13 +1861,11 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
                        req->tc_wl[j].high =
                                cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
                        req->tc_wl[j].high |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                               cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                        req->tc_wl[j].low =
                                cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
                        req->tc_wl[j].low |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                                cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                }
        }
 
@@ -1911,13 +1907,11 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
                        req->com_thrd[j].high =
                                cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
                        req->com_thrd[j].high |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                                cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                        req->com_thrd[j].low =
                                cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
                        req->com_thrd[j].low |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                                cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                }
        }
 
@@ -1943,14 +1937,10 @@ static int hclge_common_wl_config(struct hclge_dev *hdev,
 
        req = (struct hclge_rx_com_wl *)desc.data;
        req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
-       req->com_wl.high |=
-               cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
-                           HCLGE_RX_PRIV_EN_B);
+       req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
 
        req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
-       req->com_wl.low |=
-               cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
-                           HCLGE_RX_PRIV_EN_B);
+       req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
@@ -2118,48 +2108,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
 
-       hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
+       hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
 
        switch (speed) {
        case HCLGE_MAC_SPEED_10M:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 6);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 6);
                break;
        case HCLGE_MAC_SPEED_100M:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 7);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 7);
                break;
        case HCLGE_MAC_SPEED_1G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 0);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 0);
                break;
        case HCLGE_MAC_SPEED_10G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 1);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 1);
                break;
        case HCLGE_MAC_SPEED_25G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 2);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 2);
                break;
        case HCLGE_MAC_SPEED_40G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 3);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 3);
                break;
        case HCLGE_MAC_SPEED_50G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 4);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 4);
                break;
        case HCLGE_MAC_SPEED_100G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 5);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 5);
                break;
        default:
                dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
                return -EINVAL;
        }
 
-       hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
-                    1);
+       hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
+                     1);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
@@ -2201,9 +2191,9 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
                return ret;
        }
 
-       *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
-       speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
-                                  HCLGE_QUERY_SPEED_S);
+       *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
+       speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
+                                   HCLGE_QUERY_SPEED_S);
 
        ret = hclge_parse_speed(speed_tmp, speed);
        if (ret) {
@@ -2225,7 +2215,7 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
 
        req = (struct hclge_config_auto_neg_cmd *)desc.data;
-       hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+       hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
        req->cfg_an_cmd_flag = cpu_to_le32(flag);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2269,8 +2259,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
        req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
 
-       hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
-                    mask_vlan ? 1 : 0);
+       hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+                     mask_vlan ? 1 : 0);
        ether_addr_copy(req->mac_mask, mac_mask);
 
        status = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2505,7 +2495,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
        u32 cmdq_src_reg;
 
        /* fetch the events from their corresponding regs */
-       rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+       rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
        cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
 
        /* Assumption: If by any chance reset and mailbox events are reported
@@ -2517,12 +2507,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
 
        /* check for vector0 reset event sources */
        if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+               set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
                set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
                *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
                return HCLGE_VECTOR0_EVENT_RST;
        }
 
        if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+               set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
                set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
                *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
                return HCLGE_VECTOR0_EVENT_RST;
@@ -2614,6 +2606,12 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
 
 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
 {
+       if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
+               dev_warn(&hdev->pdev->dev,
+                        "vector(vector_id %d) has been freed.\n", vector_id);
+               return;
+       }
+
        hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
        hdev->num_msi_left += 1;
        hdev->num_msi_used -= 1;
@@ -2705,7 +2703,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
        }
 
        val = hclge_read_dev(&hdev->hw, reg);
-       while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
+       while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
                msleep(HCLGE_RESET_WATI_MS);
                val = hclge_read_dev(&hdev->hw, reg);
                cnt++;
@@ -2727,8 +2725,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
        int ret;
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
-       hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
-       hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
+       hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
        req->fun_reset_vfid = func_id;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2747,13 +2744,13 @@ static void hclge_do_reset(struct hclge_dev *hdev)
        switch (hdev->reset_type) {
        case HNAE3_GLOBAL_RESET:
                val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
-               hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
+               hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
                hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
                dev_info(&pdev->dev, "Global Reset requested\n");
                break;
        case HNAE3_CORE_RESET:
                val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
-               hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
+               hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
                hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
                dev_info(&pdev->dev, "Core Reset requested\n");
                break;
@@ -2810,8 +2807,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
                clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
                break;
        default:
-               dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d",
-                        hdev->reset_type);
                break;
        }
 
@@ -2824,16 +2819,17 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
 
 static void hclge_reset(struct hclge_dev *hdev)
 {
-       /* perform reset of the stack & ae device for a client */
+       struct hnae3_handle *handle;
 
+       /* perform reset of the stack & ae device for a client */
+       handle = &hdev->vport[0].nic;
+       rtnl_lock();
        hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
 
        if (!hclge_reset_wait(hdev)) {
-               rtnl_lock();
                hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
                hclge_reset_ae_dev(hdev->ae_dev);
                hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
-               rtnl_unlock();
 
                hclge_clear_reset_cause(hdev);
        } else {
@@ -2843,6 +2839,8 @@ static void hclge_reset(struct hclge_dev *hdev)
        }
 
        hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+       handle->last_reset_time = jiffies;
+       rtnl_unlock();
 }
 
 static void hclge_reset_event(struct hnae3_handle *handle)
@@ -2855,8 +2853,13 @@ static void hclge_reset_event(struct hnae3_handle *handle)
         * know this if last reset request did not occur very recently (watchdog
         * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
         * In case of new request we reset the "reset level" to PF reset.
+        * And if it is a repeat reset request of the most recent one then we
+        * want to make sure we throttle the reset request. Therefore, we will
+        * not allow it again before 3*HZ times.
         */
-       if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
+       if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+               return;
+       else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
                handle->reset_level = HNAE3_FUNC_RESET;
 
        dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
@@ -2868,8 +2871,6 @@ static void hclge_reset_event(struct hnae3_handle *handle)
 
        if (handle->reset_level < HNAE3_GLOBAL_RESET)
                handle->reset_level++;
-
-       handle->last_reset_time = jiffies;
 }
 
 static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -3110,11 +3111,11 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
                u16 mode = 0;
 
-               hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
-               hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
-                              HCLGE_RSS_TC_SIZE_S, tc_size[i]);
-               hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
-                              HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
+               hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
+               hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
+                               HCLGE_RSS_TC_SIZE_S, tc_size[i]);
+               hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
+                               HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
 
                req->rss_tc_mode[i] = cpu_to_le16(mode);
        }
@@ -3491,16 +3492,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
        i = 0;
        for (node = ring_chain; node; node = node->next) {
                tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
-               hnae_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
-                              HCLGE_INT_TYPE_S,
-                              hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-               hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
-                              HCLGE_TQP_ID_S, node->tqp_index);
-               hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
-                              HCLGE_INT_GL_IDX_S,
-                              hnae_get_field(node->int_gl_idx,
-                                             HNAE3_RING_GL_IDX_M,
-                                             HNAE3_RING_GL_IDX_S));
+               hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
+                               HCLGE_INT_TYPE_S,
+                               hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
+               hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+                               HCLGE_TQP_ID_S, node->tqp_index);
+               hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
+                               HCLGE_INT_GL_IDX_S,
+                               hnae3_get_field(node->int_gl_idx,
+                                               HNAE3_RING_GL_IDX_M,
+                                               HNAE3_RING_GL_IDX_S));
                req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
                if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
                        req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -3648,20 +3649,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
        int ret;
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
-       hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
-       hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
-       hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
-       hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
-       hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
        req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3689,7 +3690,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
 
        /* 2 Then setup the loopback flag */
        loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
-       hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
 
        req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
 
@@ -3953,10 +3954,10 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
        req = (struct hclge_mta_filter_mode_cmd *)desc.data;
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
 
-       hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
-                    enable);
-       hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
-                      HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
+       hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
+                     enable);
+       hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
+                       HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
@@ -3980,8 +3981,8 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
        req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
 
-       hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
-                    enable);
+       hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
+                     enable);
        req->function_id = func_id;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -4007,10 +4008,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
 
        req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
-       hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
+       hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
 
-       hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
-                      HCLGE_CFG_MTA_ITEM_IDX_S, idx);
+       hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
+                       HCLGE_CFG_MTA_ITEM_IDX_S, idx);
        req->item_idx = cpu_to_le16(item_idx);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -4257,17 +4258,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
        }
 
        memset(&req, 0, sizeof(req));
-       hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
-       hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-
-       hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
-       hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
-       hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
-                      HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
-       hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
-                      HCLGE_MAC_EPORT_PFID_S, 0);
+       hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+
+       hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+                       HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
 
        req.egress_port = cpu_to_le16(egress_port);
 
@@ -4318,8 +4312,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
        }
 
        memset(&req, 0, sizeof(req));
-       hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+       hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+       hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
        hclge_prepare_mac_addr(&req, addr);
        ret = hclge_remove_mac_vlan_tbl(vport, &req);
 
@@ -4351,10 +4345,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
                return -EINVAL;
        }
        memset(&req, 0, sizeof(req));
-       hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
-       hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+       hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+       hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+       hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+       hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
        hclge_prepare_mac_addr(&req, addr);
        status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
        if (!status) {
@@ -4418,10 +4412,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
        }
 
        memset(&req, 0, sizeof(req));
-       hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
-       hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+       hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+       hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+       hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+       hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
        hclge_prepare_mac_addr(&req, addr);
        status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
        if (!status) {
@@ -4802,19 +4796,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
        req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
        req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
        req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
-                       vcfg->accept_tag1 ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
-                       vcfg->accept_untag1 ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
-                       vcfg->accept_tag2 ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
-                       vcfg->accept_untag2 ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
-                    vcfg->insert_tag1_en ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
-                    vcfg->insert_tag2_en ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
+                     vcfg->accept_tag1 ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
+                     vcfg->accept_untag1 ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
+                     vcfg->accept_tag2 ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
+                     vcfg->accept_untag2 ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+                     vcfg->insert_tag1_en ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+                     vcfg->insert_tag2_en ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
 
        req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
        req->vf_bitmap[req->vf_offset] =
@@ -4840,14 +4834,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
 
        req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
-                    vcfg->strip_tag1_en ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
-                    vcfg->strip_tag2_en ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
-                    vcfg->vlan1_vlan_prionly ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
-                    vcfg->vlan2_vlan_prionly ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+                     vcfg->strip_tag1_en ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+                     vcfg->strip_tag2_en ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+                     vcfg->vlan1_vlan_prionly ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+                     vcfg->vlan2_vlan_prionly ? 1 : 0);
 
        req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
        req->vf_bitmap[req->vf_offset] =
@@ -4999,6 +4993,7 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
 
        req = (struct hclge_config_max_frm_size_cmd *)desc.data;
        req->max_frm_size = cpu_to_le16(max_frm_size);
+       req->min_frm_size = HCLGE_MAC_MIN_FRAME;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
@@ -5043,7 +5038,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
 
        req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
        req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
-       hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
+       hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
@@ -5073,7 +5068,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
                return ret;
        }
 
-       return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+       return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
 }
 
 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
@@ -5380,12 +5375,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
        phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
 
        retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
-       mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
-                                  HCLGE_PHY_MDIX_CTRL_S);
+       mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
+                                   HCLGE_PHY_MDIX_CTRL_S);
 
        retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
-       mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
-       is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
+       mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
+       is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
 
        phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
 
@@ -5531,7 +5526,6 @@ static int hclge_pci_init(struct hclge_dev *hdev)
 
        pci_set_master(pdev);
        hw = &hdev->hw;
-       hw->back = hdev;
        hw->io_base = pcim_iomap(pdev, 2, 0);
        if (!hw->io_base) {
                dev_err(&pdev->dev, "Can't map configuration register space\n");
@@ -5562,6 +5556,30 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
        pci_disable_device(pdev);
 }
 
+static void hclge_state_init(struct hclge_dev *hdev)
+{
+       set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
+       set_bit(HCLGE_STATE_DOWN, &hdev->state);
+       clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+       clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+       clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+       clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_state_uninit(struct hclge_dev *hdev)
+{
+       set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+       if (hdev->service_timer.function)
+               del_timer_sync(&hdev->service_timer);
+       if (hdev->service_task.func)
+               cancel_work_sync(&hdev->service_task);
+       if (hdev->rst_service_task.func)
+               cancel_work_sync(&hdev->rst_service_task);
+       if (hdev->mbx_service_task.func)
+               cancel_work_sync(&hdev->mbx_service_task);
+}
+
 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 {
        struct pci_dev *pdev = ae_dev->pdev;
@@ -5702,12 +5720,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        /* Enable MISC vector(vector0) */
        hclge_enable_vector(&hdev->misc_vector, true);
 
-       set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
-       set_bit(HCLGE_STATE_DOWN, &hdev->state);
-       clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
-       clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
-       clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
-       clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+       hclge_state_init(hdev);
 
        pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
        return 0;
@@ -5812,16 +5825,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
        struct hclge_dev *hdev = ae_dev->priv;
        struct hclge_mac *mac = &hdev->hw.mac;
 
-       set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
-       if (hdev->service_timer.function)
-               del_timer_sync(&hdev->service_timer);
-       if (hdev->service_task.func)
-               cancel_work_sync(&hdev->service_task);
-       if (hdev->rst_service_task.func)
-               cancel_work_sync(&hdev->rst_service_task);
-       if (hdev->mbx_service_task.func)
-               cancel_work_sync(&hdev->mbx_service_task);
+       hclge_state_uninit(hdev);
 
        if (mac->phydev)
                mdiobus_unregister(mac->mdio_bus);
@@ -6149,8 +6153,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
 
        req = (struct hclge_set_led_state_cmd *)desc.data;
-       hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
-                      HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+       hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+                       HCLGE_LED_LOCATE_STATE_S, locate_led_status);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
@@ -6280,7 +6284,6 @@ static const struct hnae3_ae_ops hclge_ops = {
 
 static struct hnae3_ae_algo ae_algo = {
        .ops = &hclge_ops,
-       .name = HCLGE_NAME,
        .pdev_id_table = ae_algo_pci_tbl,
 };
 
index 7488534528cdbea4e3ec70f0ca346a5d8453d315..a5abf8ee9b9681cf36b1e10b8a172e879e32cd69 100644 (file)
@@ -89,6 +89,7 @@
 
 /* Reset related Registers */
 #define HCLGE_MISC_RESET_STS_REG       0x20700
+#define HCLGE_MISC_VECTOR_INT_STS      0x20800
 #define HCLGE_GLOBAL_RESET_REG         0x20A00
 #define HCLGE_GLOBAL_RESET_BIT         0x0
 #define HCLGE_CORE_RESET_BIT           0x1
@@ -128,6 +129,7 @@ enum HCLGE_DEV_STATE {
        HCLGE_STATE_MBX_SERVICE_SCHED,
        HCLGE_STATE_MBX_HANDLING,
        HCLGE_STATE_STATISTICS_UPDATING,
+       HCLGE_STATE_CMD_DISABLE,
        HCLGE_STATE_MAX
 };
 
@@ -190,7 +192,6 @@ struct hclge_hw {
        int num_vec;
        struct hclge_cmq cmq;
        struct hclge_caps caps;
-       void *back;
 };
 
 /* TQP stats */
index 7541cb9b71ce2a6c5886dbd9bcf64028dfb36101..f34851c91eb39432705a6206959feffa7cc56529 100644 (file)
@@ -104,13 +104,15 @@ static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
        }
 }
 
-/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
+ * from mailbox message
  * msg[0]: opcode
  * msg[1]: <not relevant to this function>
  * msg[2]: ring_num
  * msg[3]: first ring type (TX|RX)
  * msg[4]: first tqp id
- * msg[5] ~ msg[14]: other ring type and tqp id
+ * msg[5]: first int_gl idx
+ * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
  */
 static int hclge_get_ring_chain_from_mbx(
                        struct hclge_mbx_vf_to_pf_cmd *req,
@@ -128,12 +130,12 @@ static int hclge_get_ring_chain_from_mbx(
                HCLGE_MBX_RING_NODE_VARIABLE_NUM))
                return -ENOMEM;
 
-       hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+       hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
        ring_chain->tqp_index =
                        hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
-       hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
-                      HCLGE_INT_GL_IDX_S,
-                      req->msg[5]);
+       hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+                       HNAE3_RING_GL_IDX_S,
+                       req->msg[5]);
 
        cur_chain = ring_chain;
 
@@ -142,19 +144,19 @@ static int hclge_get_ring_chain_from_mbx(
                if (!new_chain)
                        goto err;
 
-               hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
-                            req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
-                            HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
+               hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+                             req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
+                             HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
 
                new_chain->tqp_index =
                hclge_get_queue_id(vport->nic.kinfo.tqp
                        [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
                        HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
 
-               hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
-                              HCLGE_INT_GL_IDX_S,
-                              req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
-                              HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
+               hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+                               HNAE3_RING_GL_IDX_S,
+                               req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
+                               HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
 
                cur_chain->next = new_chain;
                cur_chain = new_chain;
@@ -460,7 +462,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
 
                flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
-               if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+               if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
                        dev_warn(&hdev->pdev->dev,
                                 "dropped invalid mailbox message, code = %d\n",
                                 req->msg[0]);
index 9f7932e423b5ec3efdad1b7d0cce4f39a1847e07..b6cfe6ff988dc2174d2459948a9c7faae0750b18 100644 (file)
@@ -67,16 +67,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
 
        mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
 
-       hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
-                      HCLGE_MDIO_PHYID_S, phyid);
-       hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
-                      HCLGE_MDIO_PHYREG_S, regnum);
+       hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+                       HCLGE_MDIO_PHYID_S, phyid);
+       hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+                       HCLGE_MDIO_PHYREG_S, regnum);
 
-       hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
-       hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
-                      HCLGE_MDIO_CTRL_ST_S, 1);
-       hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
-                      HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
+       hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+       hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+                       HCLGE_MDIO_CTRL_ST_S, 1);
+       hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+                       HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
 
        mdio_cmd->data_wr = cpu_to_le16(data);
 
@@ -105,16 +105,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
 
        mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
 
-       hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
-                      HCLGE_MDIO_PHYID_S, phyid);
-       hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
-                      HCLGE_MDIO_PHYREG_S, regnum);
+       hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+                       HCLGE_MDIO_PHYID_S, phyid);
+       hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+                       HCLGE_MDIO_PHYREG_S, regnum);
 
-       hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
-       hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
-                      HCLGE_MDIO_CTRL_ST_S, 1);
-       hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
-                      HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
+       hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+       hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+                       HCLGE_MDIO_CTRL_ST_S, 1);
+       hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+                       HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
 
        /* Read out phy data */
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -125,7 +125,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
                return ret;
        }
 
-       if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
+       if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
                dev_err(&hdev->pdev->dev, "mdio read data error\n");
                return -EIO;
        }
index 262c125f81375a8f91f9bccc899144ad01f2605a..e2acf3bd6ba3197e2b4bbdf14c46df5ce3b74ccc 100644 (file)
@@ -1184,10 +1184,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
                        u16 qs_id = vport->qs_offset + tc;
                        u8 grp, sub_grp;
 
-                       grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M,
-                                            HCLGE_BP_GRP_ID_S);
-                       sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
-                                                HCLGE_BP_SUB_GRP_ID_S);
+                       grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
+                                             HCLGE_BP_GRP_ID_S);
+                       sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+                                                 HCLGE_BP_SUB_GRP_ID_S);
                        if (i == grp)
                                qs_bitmap |= (1 << sub_grp);
 
@@ -1223,6 +1223,10 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
                tx_en = true;
                rx_en = true;
                break;
+       case HCLGE_FC_PFC:
+               tx_en = false;
+               rx_en = false;
+               break;
        default:
                tx_en = true;
                rx_en = true;
@@ -1240,8 +1244,9 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
        if (ret)
                return ret;
 
-       if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
-               return hclge_mac_pause_setup_hw(hdev);
+       ret = hclge_mac_pause_setup_hw(hdev);
+       if (ret)
+               return ret;
 
        /* Only DCB-supported dev supports qset back pressure and pfc cmd */
        if (!hnae3_dev_dcb_supported(hdev))
index c2b6e8a6700f067fa38511fdc34b27a1eb1dc75e..c82d49ebd5bfa807175a15d9c76961784602e39a 100644 (file)
@@ -123,10 +123,11 @@ struct hclge_port_shapping_cmd {
 };
 
 #define hclge_tm_set_field(dest, string, val) \
-                       hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
-                                      (HCLGE_TM_SHAP_##string##_LSH), val)
+                          hnae3_set_field((dest), \
+                          (HCLGE_TM_SHAP_##string##_MSK), \
+                          (HCLGE_TM_SHAP_##string##_LSH), val)
 #define hclge_tm_get_field(src, string) \
-                       hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
+                       hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
                                       (HCLGE_TM_SHAP_##string##_LSH))
 
 int hclge_tm_schd_init(struct hclge_dev *hdev);
index 1bbfe131b596e498c8c12dff3339b5e48bec6acd..fb471fe2c4946692e1c36b31bf92ec325812a372 100644 (file)
@@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
 {
        int size = ring->desc_num * sizeof(struct hclgevf_desc);
 
-       ring->desc = kzalloc(size, GFP_KERNEL);
+       ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
+                                        size, &ring->desc_dma_addr,
+                                        GFP_KERNEL);
        if (!ring->desc)
                return -ENOMEM;
 
-       ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
-                                            size, DMA_BIDIRECTIONAL);
-
-       if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
-               ring->desc_dma_addr = 0;
-               kfree(ring->desc);
-               ring->desc = NULL;
-               return -ENOMEM;
-       }
-
        return 0;
 }
 
 static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
 {
-       dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
-                        ring->desc_num * sizeof(ring->desc[0]),
-                        hclgevf_ring_to_dma_dir(ring));
+       int size  = ring->desc_num * sizeof(struct hclgevf_desc);
 
-       ring->desc_dma_addr = 0;
-       kfree(ring->desc);
-       ring->desc = NULL;
+       if (ring->desc) {
+               dma_free_coherent(cmq_ring_to_dev(ring), size,
+                                 ring->desc, ring->desc_dma_addr);
+               ring->desc = NULL;
+       }
 }
 
 static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
index a17872aab168112e637ba64747d09ee343712e8c..d1f16f0c1646f45b91d67713c8e95d3413eb53f8 100644 (file)
@@ -330,6 +330,12 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
 
 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
 {
+       if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
+               dev_warn(&hdev->pdev->dev,
+                        "vector(vector_id %d) has been freed.\n", vector_id);
+               return;
+       }
+
        hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
        hdev->num_msi_left += 1;
        hdev->num_msi_used -= 1;
@@ -444,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
 
        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
-               hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
-                            (tc_valid[i] & 0x1));
-               hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
-                              HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
-               hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
-                              HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+               hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+                             (tc_valid[i] & 0x1));
+               hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+                               HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+               hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+                               HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
        }
        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
        if (status)
@@ -547,24 +553,18 @@ static int hclgevf_get_tc_size(struct hnae3_handle *handle)
 }
 
 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
-                                      int vector,
+                                      int vector_id,
                                       struct hnae3_ring_chain_node *ring_chain)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
        struct hnae3_ring_chain_node *node;
        struct hclge_mbx_vf_to_pf_cmd *req;
        struct hclgevf_desc desc;
-       int i = 0, vector_id;
+       int i = 0;
        int status;
        u8 type;
 
        req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
-       vector_id = hclgevf_get_vector_index(hdev, vector);
-       if (vector_id < 0) {
-               dev_err(&handle->pdev->dev,
-                       "Get vector index fail. ret =%d\n", vector_id);
-               return vector_id;
-       }
 
        for (node = ring_chain; node; node = node->next) {
                int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
@@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
                }
 
                req->msg[idx_offset] =
-                               hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+                               hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
                req->msg[idx_offset + 1] = node->tqp_index;
-               req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx,
-                                                         HNAE3_RING_GL_IDX_M,
-                                                         HNAE3_RING_GL_IDX_S);
+               req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
+                                                          HNAE3_RING_GL_IDX_M,
+                                                          HNAE3_RING_GL_IDX_S);
 
                i++;
                if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
@@ -617,7 +617,17 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
                                      struct hnae3_ring_chain_node *ring_chain)
 {
-       return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+       struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+       int vector_id;
+
+       vector_id = hclgevf_get_vector_index(hdev, vector);
+       if (vector_id < 0) {
+               dev_err(&handle->pdev->dev,
+                       "Get vector index fail. ret =%d\n", vector_id);
+               return vector_id;
+       }
+
+       return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
 }
 
 static int hclgevf_unmap_ring_from_vector(
@@ -635,7 +645,7 @@ static int hclgevf_unmap_ring_from_vector(
                return vector_id;
        }
 
-       ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+       ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
        if (ret)
                dev_err(&handle->pdev->dev,
                        "Unmap ring from vector fail. vector=%d, ret =%d\n",
@@ -648,8 +658,17 @@ static int hclgevf_unmap_ring_from_vector(
 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+       int vector_id;
 
-       hclgevf_free_vector(hdev, vector);
+       vector_id = hclgevf_get_vector_index(hdev, vector);
+       if (vector_id < 0) {
+               dev_err(&handle->pdev->dev,
+                       "hclgevf_put_vector get vector index fail. ret =%d\n",
+                       vector_id);
+               return vector_id;
+       }
+
+       hclgevf_free_vector(hdev, vector_id);
 
        return 0;
 }
@@ -990,8 +1009,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
 
        /* wait to check the hardware reset completion status */
        val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
-       while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
-                           (cnt < HCLGEVF_RESET_WAIT_CNT)) {
+       while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
+              (cnt < HCLGEVF_RESET_WAIT_CNT)) {
                msleep(HCLGEVF_RESET_WAIT_MS);
                val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
                cnt++;
@@ -1582,9 +1601,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
        hclgevf_free_vector(hdev, 0);
 }
 
-static int hclgevf_init_instance(struct hclgevf_dev *hdev,
-                                struct hnae3_client *client)
+static int hclgevf_init_client_instance(struct hnae3_client *client,
+                                       struct hnae3_ae_dev *ae_dev)
 {
+       struct hclgevf_dev *hdev = ae_dev->priv;
        int ret;
 
        switch (client->type) {
@@ -1635,9 +1655,11 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev,
        return 0;
 }
 
-static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
-                                   struct hnae3_client *client)
+static void hclgevf_uninit_client_instance(struct hnae3_client *client,
+                                          struct hnae3_ae_dev *ae_dev)
 {
+       struct hclgevf_dev *hdev = ae_dev->priv;
+
        /* un-init roce, if it exists */
        if (hdev->roce_client)
                hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
@@ -1648,22 +1670,6 @@ static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
                client->ops->uninit_instance(&hdev->nic, 0);
 }
 
-static int hclgevf_register_client(struct hnae3_client *client,
-                                  struct hnae3_ae_dev *ae_dev)
-{
-       struct hclgevf_dev *hdev = ae_dev->priv;
-
-       return hclgevf_init_instance(hdev, client);
-}
-
-static void hclgevf_unregister_client(struct hnae3_client *client,
-                                     struct hnae3_ae_dev *ae_dev)
-{
-       struct hclgevf_dev *hdev = ae_dev->priv;
-
-       hclgevf_uninit_instance(hdev, client);
-}
-
 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
 {
        struct pci_dev *pdev = hdev->pdev;
@@ -1924,8 +1930,8 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
 static const struct hnae3_ae_ops hclgevf_ops = {
        .init_ae_dev = hclgevf_init_ae_dev,
        .uninit_ae_dev = hclgevf_uninit_ae_dev,
-       .init_client_instance = hclgevf_register_client,
-       .uninit_client_instance = hclgevf_unregister_client,
+       .init_client_instance = hclgevf_init_client_instance,
+       .uninit_client_instance = hclgevf_uninit_client_instance,
        .start = hclgevf_ae_start,
        .stop = hclgevf_ae_stop,
        .map_ring_to_vector = hclgevf_map_ring_to_vector,
@@ -1962,7 +1968,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
 
 static struct hnae3_ae_algo ae_algovf = {
        .ops = &hclgevf_ops,
-       .name = HCLGEVF_NAME,
        .pdev_id_table = ae_algovf_pci_tbl,
 };
 
index b598c06af8e09e708b5a9c63865eb9208198b060..e9d5a4f96304e114722caea9c21509a4e0b6cc6c 100644 (file)
@@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
 
                flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
-               if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
+               if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
                        dev_warn(&hdev->pdev->dev,
                                 "dropped invalid mailbox message, code = %d\n",
                                 req->msg[0]);
@@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
 
                        /* tail the async message in arq */
                        msg_q = hdev->arq.msg_q[hdev->arq.tail];
-                       memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE);
+                       memcpy(&msg_q[0], req->msg,
+                              HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
                        hclge_mbx_tail_ptr_move_arq(hdev->arq);
                        hdev->arq.count++;
 
index 79b56744708427eb741a1fc8f964f28beb3f0961..6b19607a4caac0f846186917b9c4b286f3b0b9b6 100644 (file)
@@ -264,7 +264,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
        struct hinic_hwif *hwif = hwdev->hwif;
        struct pci_dev *pdev = hwif->pdev;
        struct hinic_cmd_fw_ctxt fw_ctxt;
-       struct hinic_pfhwdev *pfhwdev;
        u16 out_size;
        int err;
 
@@ -276,8 +275,6 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev)
        fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
        fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
 
-       pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
        err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
                                 &fw_ctxt, sizeof(fw_ctxt),
                                 &fw_ctxt, &out_size);
index e2e5cdc7119c3ed0e890f99c7b30996d72d280e9..4c0f7eda1166c5df202c3b9a71cc2e43516531fb 100644 (file)
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
 {
        struct hinic_rq *rq = rxq->rq;
 
+       irq_set_affinity_hint(rq->irq, NULL);
        free_irq(rq->irq, rxq);
        rx_del_napi(rxq);
 }
index c944bd10b03d29c5711903714456ac9ca975e735..51762428b40e6ef91e567c4f60c3459cae275402 100644 (file)
@@ -7522,7 +7522,7 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
        case TC_CLSFLOWER_STATS:
                return -EOPNOTSUPP;
        default:
-               return -EINVAL;
+               return -EOPNOTSUPP;
        }
 }
 
@@ -7554,7 +7554,7 @@ static int i40e_setup_tc_block(struct net_device *dev,
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
-                                            np, np);
+                                            np, np, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
                return 0;
@@ -11841,7 +11841,6 @@ static int i40e_xdp(struct net_device *dev,
        case XDP_SETUP_PROG:
                return i40e_xdp_setup(vsi, xdp->prog);
        case XDP_QUERY_PROG:
-               xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
                xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
                return 0;
        default:
@@ -11978,7 +11977,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
                         IFNAMSIZ - 4,
                         pf->vsi[pf->lan_vsi]->netdev->name);
-               random_ether_addr(mac_addr);
+               eth_random_addr(mac_addr);
 
                spin_lock_bh(&vsi->mac_filter_hash_lock);
                i40e_add_mac_filter(vsi, mac_addr);
index 8ffb7454e67c2a0309708c1b47487c4d1c58b440..b151ae316546c2483aa91abfabc900b608e53e4a 100644 (file)
@@ -2103,9 +2103,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
        unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
-                               SKB_DATA_ALIGN(I40E_SKB_PAD +
-                                              (xdp->data_end -
-                                               xdp->data_hard_start));
+                               SKB_DATA_ALIGN(xdp->data_end -
+                                              xdp->data_hard_start);
 #endif
        struct sk_buff *skb;
 
@@ -2124,7 +2123,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
                return NULL;
 
        /* update pointers within the skb to store the data */
-       skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
+       skb_reserve(skb, xdp->data - xdp->data_hard_start);
        __skb_put(skb, xdp->data_end - xdp->data);
        if (metasize)
                skb_metadata_set(skb, metasize);
@@ -2200,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
        return true;
 }
 
-#define I40E_XDP_PASS 0
-#define I40E_XDP_CONSUMED 1
-#define I40E_XDP_TX 2
+#define I40E_XDP_PASS          0
+#define I40E_XDP_CONSUMED      BIT(0)
+#define I40E_XDP_TX            BIT(1)
+#define I40E_XDP_REDIR         BIT(2)
 
 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
                              struct i40e_ring *xdp_ring);
@@ -2249,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
+               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -2312,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        struct sk_buff *skb = rx_ring->skb;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       bool failure = false, xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
+       bool failure = false;
        struct xdp_buff xdp;
 
        xdp.rxq = &rx_ring->xdp_rxq;
@@ -2373,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                }
 
                if (IS_ERR(skb)) {
-                       if (PTR_ERR(skb) == -I40E_XDP_TX) {
-                               xdp_xmit = true;
+                       unsigned int xdp_res = -PTR_ERR(skb);
+
+                       if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
                                i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
                        } else {
                                rx_buffer->pagecnt_bias++;
@@ -2428,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                total_rx_packets++;
        }
 
-       if (xdp_xmit) {
+       if (xdp_xmit & I40E_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & I40E_XDP_TX) {
                struct i40e_ring *xdp_ring =
                        rx_ring->vsi->xdp_rings[rx_ring->queue_index];
 
                i40e_xdp_ring_update_tail(xdp_ring);
-               xdp_do_flush_map();
        }
 
        rx_ring->skb = skb;
index a7b87f93541138c497056f9c91dacbd91c519fc5..5906c1c1d19d82d7e37b0a891e457fea792b4153 100644 (file)
@@ -2884,7 +2884,7 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
        case TC_CLSFLOWER_STATS:
                return -EOPNOTSUPP;
        default:
-               return -EINVAL;
+               return -EOPNOTSUPP;
        }
 }
 
@@ -2926,7 +2926,7 @@ static int i40evf_setup_tc_block(struct net_device *dev,
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
-                                            adapter, adapter);
+                                            adapter, adapter, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
                                        adapter);
index 252440a418dc4ba0dc0693cfb5a3b4dd69fd6b28..8a28f3388f699bf30df581af4f9aa08ed5f2b567 100644 (file)
 #define E1000_TQAVCTRL_XMIT_MODE       BIT(0)
 #define E1000_TQAVCTRL_DATAFETCHARB    BIT(4)
 #define E1000_TQAVCTRL_DATATRANARB     BIT(8)
+#define E1000_TQAVCTRL_DATATRANTIM     BIT(9)
+#define E1000_TQAVCTRL_SP_WAIT_SR      BIT(10)
+/* Fetch Time Delta - bits 31:16
+ *
+ * This field holds the value to be reduced from the launch time for
+ * fetch time decision. The FetchTimeDelta value is defined in 32 ns
+ * granularity.
+ *
+ * This field is 16 bits wide, and so the maximum value is:
+ *
+ * 65535 * 32 = 2097120 ~= 2.1 msec
+ *
+ * XXX: We are configuring the max value here since we couldn't come up
+ * with a reason for not doing so.
+ */
+#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16)
 
 /* TX Qav Credit Control fields */
 #define E1000_TQAVCC_IDLESLOPE_MASK    0xFFFF
index 9643b5b3d444b2aa19d0f73045d4a72911e24fd4..ca54e268d157bd9afb7ab23854a8ae52ff260215 100644 (file)
@@ -262,6 +262,7 @@ struct igb_ring {
        u16 count;                      /* number of desc. in the ring */
        u8 queue_index;                 /* logical index of the ring*/
        u8 reg_idx;                     /* physical index of the ring */
+       bool launchtime_enable;         /* true if LaunchTime is enabled */
        bool cbs_enable;                /* indicates if CBS is enabled */
        s32 idleslope;                  /* idleSlope in kbps */
        s32 sendslope;                  /* sendSlope in kbps */
index f707709969acfee137d30b698304dfd99f7c45e7..e3a0c02721c9e239b927c3166c55b70c74671d49 100644 (file)
@@ -1654,33 +1654,65 @@ static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
        wr32(E1000_I210_TQAVCC(queue), val);
 }
 
+static bool is_any_cbs_enabled(struct igb_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               if (adapter->tx_ring[i]->cbs_enable)
+                       return true;
+       }
+
+       return false;
+}
+
+static bool is_any_txtime_enabled(struct igb_adapter *adapter)
+{
+       int i;
+
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               if (adapter->tx_ring[i]->launchtime_enable)
+                       return true;
+       }
+
+       return false;
+}
+
 /**
- *  igb_configure_cbs - Configure Credit-Based Shaper (CBS)
+ *  igb_config_tx_modes - Configure "Qav Tx mode" features on igb
  *  @adapter: pointer to adapter struct
  *  @queue: queue number
- *  @enable: true = enable CBS, false = disable CBS
- *  @idleslope: idleSlope in kbps
- *  @sendslope: sendSlope in kbps
- *  @hicredit: hiCredit in bytes
- *  @locredit: loCredit in bytes
  *
- *  Configure CBS for a given hardware queue. When disabling, idleslope,
- *  sendslope, hicredit, locredit arguments are ignored. Returns 0 if
- *  success. Negative otherwise.
+ *  Configure CBS and Launchtime for a given hardware queue.
+ *  Parameters are retrieved from the correct Tx ring, so
+ *  igb_save_cbs_params() and igb_save_txtime_params() should be used
+ *  for setting those correctly prior to this function being called.
  **/
-static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
-                             bool enable, int idleslope, int sendslope,
-                             int hicredit, int locredit)
+static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
 {
+       struct igb_ring *ring = adapter->tx_ring[queue];
        struct net_device *netdev = adapter->netdev;
        struct e1000_hw *hw = &adapter->hw;
-       u32 tqavcc;
+       u32 tqavcc, tqavctrl;
        u16 value;
 
        WARN_ON(hw->mac.type != e1000_i210);
        WARN_ON(queue < 0 || queue > 1);
 
-       if (enable || queue == 0) {
+       /* If any of the Qav features is enabled, configure queues as SR and
+        * with HIGH PRIO. If none is, then configure them with LOW PRIO and
+        * as SP.
+        */
+       if (ring->cbs_enable || ring->launchtime_enable) {
+               set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
+               set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+       } else {
+               set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
+               set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
+       }
+
+       /* If CBS is enabled, set DataTranARB and config its parameters. */
+       if (ring->cbs_enable || queue == 0) {
                /* i210 does not allow the queue 0 to be in the Strict
                 * Priority mode while the Qav mode is enabled, so,
                 * instead of disabling strict priority mode, we give
@@ -1690,14 +1722,19 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
                 * Queue0 QueueMode must be set to 1b when
                 * TransmitMode is set to Qav."
                 */
-               if (queue == 0 && !enable) {
+               if (queue == 0 && !ring->cbs_enable) {
                        /* max "linkspeed" idleslope in kbps */
-                       idleslope = 1000000;
-                       hicredit = ETH_FRAME_LEN;
+                       ring->idleslope = 1000000;
+                       ring->hicredit = ETH_FRAME_LEN;
                }
 
-               set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
-               set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+               /* Always set data transfer arbitration to credit-based
+                * shaper algorithm on TQAVCTRL if CBS is enabled for any of
+                * the queues.
+                */
+               tqavctrl = rd32(E1000_I210_TQAVCTRL);
+               tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
+               wr32(E1000_I210_TQAVCTRL, tqavctrl);
 
                /* According to i210 datasheet section 7.2.7.7, we should set
                 * the 'idleSlope' field from TQAVCC register following the
@@ -1756,17 +1793,16 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
                 *       calculated value, so the resulting bandwidth might
                 *       be slightly higher for some configurations.
                 */
-               value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
+               value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
 
                tqavcc = rd32(E1000_I210_TQAVCC(queue));
                tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
                tqavcc |= value;
                wr32(E1000_I210_TQAVCC(queue), tqavcc);
 
-               wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
+               wr32(E1000_I210_TQAVHC(queue),
+                    0x80000000 + ring->hicredit * 0x7735);
        } else {
-               set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
-               set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
 
                /* Set idleSlope to zero. */
                tqavcc = rd32(E1000_I210_TQAVCC(queue));
@@ -1775,6 +1811,43 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
 
                /* Set hiCredit to zero. */
                wr32(E1000_I210_TQAVHC(queue), 0);
+
+               /* If CBS is not enabled for any queues anymore, then return to
+                * the default state of Data Transmission Arbitration on
+                * TQAVCTRL.
+                */
+               if (!is_any_cbs_enabled(adapter)) {
+                       tqavctrl = rd32(E1000_I210_TQAVCTRL);
+                       tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
+                       wr32(E1000_I210_TQAVCTRL, tqavctrl);
+               }
+       }
+
+       /* If LaunchTime is enabled, set DataTranTIM. */
+       if (ring->launchtime_enable) {
+               /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled
+                * for any of the SR queues, and configure fetchtime delta.
+                * XXX NOTE:
+                *     - LaunchTime will be enabled for all SR queues.
+                *     - A fixed offset can be added relative to the launch
+                *       time of all packets if configured at reg LAUNCH_OS0.
+                *       We are keeping it as 0 for now (default value).
+                */
+               tqavctrl = rd32(E1000_I210_TQAVCTRL);
+               tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
+                      E1000_TQAVCTRL_FETCHTIME_DELTA;
+               wr32(E1000_I210_TQAVCTRL, tqavctrl);
+       } else {
+               /* If Launchtime is not enabled for any SR queues anymore,
+                * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta,
+                * effectively disabling Launchtime.
+                */
+               if (!is_any_txtime_enabled(adapter)) {
+                       tqavctrl = rd32(E1000_I210_TQAVCTRL);
+                       tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
+                       tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
+                       wr32(E1000_I210_TQAVCTRL, tqavctrl);
+               }
        }
 
        /* XXX: In i210 controller the sendSlope and loCredit parameters from
@@ -1782,9 +1855,27 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
         * configuration' in respect to these parameters.
         */
 
-       netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
-                  (enable) ? "enabled" : "disabled", queue,
-                  idleslope, sendslope, hicredit, locredit);
+       netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
+                           idleslope %d sendslope %d hiCredit %d \
+                           locredit %d\n",
+                  (ring->cbs_enable) ? "enabled" : "disabled",
+                  (ring->launchtime_enable) ? "enabled" : "disabled", queue,
+                  ring->idleslope, ring->sendslope, ring->hicredit,
+                  ring->locredit);
+}
+
+static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
+                                 bool enable)
+{
+       struct igb_ring *ring;
+
+       if (queue < 0 || queue > adapter->num_tx_queues)
+               return -EINVAL;
+
+       ring = adapter->tx_ring[queue];
+       ring->launchtime_enable = enable;
+
+       return 0;
 }
 
 static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
@@ -1807,21 +1898,15 @@ static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
        return 0;
 }
 
-static bool is_any_cbs_enabled(struct igb_adapter *adapter)
-{
-       struct igb_ring *ring;
-       int i;
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               ring = adapter->tx_ring[i];
-
-               if (ring->cbs_enable)
-                       return true;
-       }
-
-       return false;
-}
-
+/**
+ *  igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
+ *  @adapter: pointer to adapter struct
+ *
+ *  Configure TQAVCTRL register switching the controller's Tx mode
+ *  if FQTSS mode is enabled or disabled. Additionally, will issue
+ *  a call to igb_config_tx_modes() per queue so any previously saved
+ *  Tx parameters are applied.
+ **/
 static void igb_setup_tx_mode(struct igb_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
@@ -1836,11 +1921,11 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
                int i, max_queue;
 
                /* Configure TQAVCTRL register: set transmit mode to 'Qav',
-                * set data fetch arbitration to 'round robin' and set data
-                * transfer arbitration to 'credit shaper algorithm.
+                * set data fetch arbitration to 'round robin', set SP_WAIT_SR
+                * so SP queues wait for SR ones.
                 */
                val = rd32(E1000_I210_TQAVCTRL);
-               val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
+               val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
                val &= ~E1000_TQAVCTRL_DATAFETCHARB;
                wr32(E1000_I210_TQAVCTRL, val);
 
@@ -1881,11 +1966,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
                            adapter->num_tx_queues : I210_SR_QUEUES_NUM;
 
                for (i = 0; i < max_queue; i++) {
-                       struct igb_ring *ring = adapter->tx_ring[i];
-
-                       igb_configure_cbs(adapter, i, ring->cbs_enable,
-                                         ring->idleslope, ring->sendslope,
-                                         ring->hicredit, ring->locredit);
+                       igb_config_tx_modes(adapter, i);
                }
        } else {
                wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
@@ -2459,6 +2540,19 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
        return features;
 }
 
+static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
+{
+       if (!is_fqtss_enabled(adapter)) {
+               enable_fqtss(adapter, true);
+               return;
+       }
+
+       igb_config_tx_modes(adapter, queue);
+
+       if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
+               enable_fqtss(adapter, false);
+}
+
 static int igb_offload_cbs(struct igb_adapter *adapter,
                           struct tc_cbs_qopt_offload *qopt)
 {
@@ -2479,17 +2573,7 @@ static int igb_offload_cbs(struct igb_adapter *adapter,
        if (err)
                return err;
 
-       if (is_fqtss_enabled(adapter)) {
-               igb_configure_cbs(adapter, qopt->queue, qopt->enable,
-                                 qopt->idleslope, qopt->sendslope,
-                                 qopt->hicredit, qopt->locredit);
-
-               if (!is_any_cbs_enabled(adapter))
-                       enable_fqtss(adapter, false);
-
-       } else {
-               enable_fqtss(adapter, true);
-       }
+       igb_offload_apply(adapter, qopt->queue);
 
        return 0;
 }
@@ -2698,7 +2782,7 @@ static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
        case TC_CLSFLOWER_STATS:
                return -EOPNOTSUPP;
        default:
-               return -EINVAL;
+               return -EOPNOTSUPP;
        }
 }
 
@@ -2728,7 +2812,7 @@ static int igb_setup_tc_block(struct igb_adapter *adapter,
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
-                                            adapter, adapter);
+                                            adapter, adapter, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
                                        adapter);
@@ -2738,6 +2822,29 @@ static int igb_setup_tc_block(struct igb_adapter *adapter,
        }
 }
 
+static int igb_offload_txtime(struct igb_adapter *adapter,
+                             struct tc_etf_qopt_offload *qopt)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       int err;
+
+       /* Launchtime offloading is only supported by i210 controller. */
+       if (hw->mac.type != e1000_i210)
+               return -EOPNOTSUPP;
+
+       /* Launchtime offloading is only supported by queues 0 and 1. */
+       if (qopt->queue < 0 || qopt->queue > 1)
+               return -EINVAL;
+
+       err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
+       if (err)
+               return err;
+
+       igb_offload_apply(adapter, qopt->queue);
+
+       return 0;
+}
+
 static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
                        void *type_data)
 {
@@ -2748,6 +2855,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
                return igb_offload_cbs(adapter, type_data);
        case TC_SETUP_BLOCK:
                return igb_setup_tc_block(adapter, type_data);
+       case TC_SETUP_QDISC_ETF:
+               return igb_offload_txtime(adapter, type_data);
 
        default:
                return -EOPNOTSUPP;
@@ -5568,11 +5677,14 @@ static void igb_set_itr(struct igb_q_vector *q_vector)
        }
 }
 
-static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
-                           u32 type_tucmd, u32 mss_l4len_idx)
+static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
+                           struct igb_tx_buffer *first,
+                           u32 vlan_macip_lens, u32 type_tucmd,
+                           u32 mss_l4len_idx)
 {
        struct e1000_adv_tx_context_desc *context_desc;
        u16 i = tx_ring->next_to_use;
+       struct timespec64 ts;
 
        context_desc = IGB_TX_CTXTDESC(tx_ring, i);
 
@@ -5587,9 +5699,18 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
                mss_l4len_idx |= tx_ring->reg_idx << 4;
 
        context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
-       context_desc->seqnum_seed       = 0;
        context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
        context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
+
+       /* We assume there is always a valid tx time available. Invalid times
+        * should have been handled by the upper layers.
+        */
+       if (tx_ring->launchtime_enable) {
+               ts = ns_to_timespec64(first->skb->tstamp);
+               context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
+       } else {
+               context_desc->seqnum_seed = 0;
+       }
 }
 
 static int igb_tso(struct igb_ring *tx_ring,
@@ -5672,7 +5793,8 @@ static int igb_tso(struct igb_ring *tx_ring,
        vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
        vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
 
-       igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+       igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
+                       type_tucmd, mss_l4len_idx);
 
        return 1;
 }
@@ -5727,7 +5849,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
        vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
        vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
 
-       igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
+       igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
 }
 
 #define IGB_SET_FLAG(_input, _flag, _result) \
@@ -6015,8 +6137,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
                }
        }
 
-       skb_tx_timestamp(skb);
-
        if (skb_vlan_tag_present(skb)) {
                tx_flags |= IGB_TX_FLAGS_VLAN;
                tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
@@ -6032,6 +6152,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
        else if (!tso)
                igb_tx_csum(tx_ring, first);
 
+       skb_tx_timestamp(skb);
+
        if (igb_tx_map(tx_ring, first, hdr_len))
                goto cleanup_tx_tstamp;
 
index 3e87dbbc90246dba3a59e3f8ccded5885b441ae2..5a6600f7b382c10e1e0d06ed5046dcc2a43e5cfe 100644 (file)
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
        return skb;
 }
 
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED 1
-#define IXGBE_XDP_TX 2
+#define IXGBE_XDP_PASS         0
+#define IXGBE_XDP_CONSUMED     BIT(0)
+#define IXGBE_XDP_TX           BIT(1)
+#define IXGBE_XDP_REDIR                BIT(2)
 
 static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
                               struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
                if (!err)
-                       result = IXGBE_XDP_TX;
+                       result = IXGBE_XDP_REDIR;
                else
                        result = IXGBE_XDP_CONSUMED;
                break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        unsigned int mss = 0;
 #endif /* IXGBE_FCOE */
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
-       bool xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
        struct xdp_buff xdp;
 
        xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                }
 
                if (IS_ERR(skb)) {
-                       if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
-                               xdp_xmit = true;
+                       unsigned int xdp_res = -PTR_ERR(skb);
+
+                       if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
                                ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
                        } else {
                                rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                total_rx_packets++;
        }
 
-       if (xdp_xmit) {
+       if (xdp_xmit & IXGBE_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & IXGBE_XDP_TX) {
                struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
 
                /* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                wmb();
                writel(ring->next_to_use, ring->tail);
-
-               xdp_do_flush_map();
        }
 
        u64_stats_update_begin(&rx_ring->syncp);
@@ -5271,6 +5275,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
                             struct ixgbe_fwd_adapter *accel)
 {
+       u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+       int num_tc = netdev_get_num_tc(adapter->netdev);
        struct net_device *vdev = accel->netdev;
        int i, baseq, err;
 
@@ -5282,6 +5288,11 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
        accel->rx_base_queue = baseq;
        accel->tx_base_queue = baseq;
 
+       /* record configuration for macvlan interface in vdev */
+       for (i = 0; i < num_tc; i++)
+               netdev_bind_sb_channel_queue(adapter->netdev, vdev,
+                                            i, rss_i, baseq + (rss_i * i));
+
        for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
                adapter->rx_ring[baseq + i]->netdev = vdev;
 
@@ -5306,6 +5317,10 @@ static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter,
 
        netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n");
 
+       /* unbind the queues and drop the subordinate channel config */
+       netdev_unbind_sb_channel(adapter->netdev, vdev);
+       netdev_set_sb_channel(vdev, 0);
+
        clear_bit(accel->pool, adapter->fwd_bitmask);
        kfree(accel);
 
@@ -8193,25 +8208,25 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
                                              input, common, ring->queue_index);
 }
 
+#ifdef IXGBE_FCOE
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
-                             void *accel_priv, select_queue_fallback_t fallback)
+                             struct net_device *sb_dev,
+                             select_queue_fallback_t fallback)
 {
-       struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
        struct ixgbe_adapter *adapter;
-       int txq;
-#ifdef IXGBE_FCOE
        struct ixgbe_ring_feature *f;
-#endif
+       int txq;
 
-       if (fwd_adapter) {
-               adapter = netdev_priv(dev);
-               txq = reciprocal_scale(skb_get_hash(skb),
-                                      adapter->num_rx_queues_per_pool);
+       if (sb_dev) {
+               u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+               struct net_device *vdev = sb_dev;
 
-               return txq + fwd_adapter->tx_base_queue;
-       }
+               txq = vdev->tc_to_txq[tc].offset;
+               txq += reciprocal_scale(skb_get_hash(skb),
+                                       vdev->tc_to_txq[tc].count);
 
-#ifdef IXGBE_FCOE
+               return txq;
+       }
 
        /*
         * only execute the code below if protocol is FCoE
@@ -8222,11 +8237,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
        case htons(ETH_P_FIP):
                adapter = netdev_priv(dev);
 
-               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
                        break;
                /* fall through */
        default:
-               return fallback(dev, skb);
+               return fallback(dev, skb, sb_dev);
        }
 
        f = &adapter->ring_feature[RING_F_FCOE];
@@ -8238,11 +8253,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
                txq -= f->indices;
 
        return txq + f->offset;
-#else
-       return fallback(dev, skb);
-#endif
 }
 
+#endif
 static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
                               struct xdp_frame *xdpf)
 {
@@ -8762,6 +8775,11 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data)
        /* if we cannot find a free pool then disable the offload */
        netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n");
        macvlan_release_l2fw_offload(vdev);
+
+       /* unbind the queues and drop the subordinate channel config */
+       netdev_unbind_sb_channel(adapter->netdev, vdev);
+       netdev_set_sb_channel(vdev, 0);
+
        kfree(accel);
 
        return 0;
@@ -9325,7 +9343,7 @@ static int ixgbe_setup_tc_block(struct net_device *dev,
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
-                                            adapter, adapter);
+                                            adapter, adapter, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
                                        adapter);
@@ -9765,6 +9783,13 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
        if (!macvlan_supports_dest_filter(vdev))
                return ERR_PTR(-EMEDIUMTYPE);
 
+       /* We need to lock down the macvlan to be a single queue device so that
+        * we can reuse the tc_to_txq field in the macvlan netdev to represent
+        * the queue mapping to our netdev.
+        */
+       if (netif_is_multiqueue(vdev))
+               return ERR_PTR(-ERANGE);
+
        pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
        if (pool == adapter->num_rx_pools) {
                u16 used_pools = adapter->num_vfs + adapter->num_rx_pools;
@@ -9821,6 +9846,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
                return ERR_PTR(-ENOMEM);
 
        set_bit(pool, adapter->fwd_bitmask);
+       netdev_set_sb_channel(vdev, pool);
        accel->pool = pool;
        accel->netdev = vdev;
 
@@ -9862,6 +9888,10 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
                ring->netdev = NULL;
        }
 
+       /* unbind the queues and drop the subordinate channel config */
+       netdev_unbind_sb_channel(pdev, accel->netdev);
+       netdev_set_sb_channel(accel->netdev, 0);
+
        clear_bit(accel->pool, adapter->fwd_bitmask);
        kfree(accel);
 }
@@ -9962,7 +9992,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        case XDP_SETUP_PROG:
                return ixgbe_xdp_setup(dev, xdp->prog);
        case XDP_QUERY_PROG:
-               xdp->prog_attached = !!(adapter->xdp_prog);
                xdp->prog_id = adapter->xdp_prog ?
                        adapter->xdp_prog->aux->id : 0;
                return 0;
@@ -10022,7 +10051,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_open               = ixgbe_open,
        .ndo_stop               = ixgbe_close,
        .ndo_start_xmit         = ixgbe_xmit_frame,
-       .ndo_select_queue       = ixgbe_select_queue,
        .ndo_set_rx_mode        = ixgbe_set_rx_mode,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = ixgbe_set_mac,
@@ -10045,6 +10073,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_poll_controller    = ixgbe_netpoll,
 #endif
 #ifdef IXGBE_FCOE
+       .ndo_select_queue       = ixgbe_select_queue,
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
        .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
        .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
index 59416eddd8402154280890a24a7023645b24abe9..d86446d202d5ed95826db225139b095d7c7a683c 100644 (file)
@@ -4462,7 +4462,6 @@ static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        case XDP_SETUP_PROG:
                return ixgbevf_xdp_setup(dev, xdp->prog);
        case XDP_QUERY_PROG:
-               xdp->prog_attached = !!(adapter->xdp_prog);
                xdp->prog_id = adapter->xdp_prog ?
                               adapter->xdp_prog->aux->id : 0;
                return 0;
index afc81006944059837b5cbbdaaaa26d9dc87d82a0..7a637b51c7d2302b63f2eb579adea4e0459e7430 100644 (file)
@@ -563,14 +563,6 @@ ltq_etop_set_multicast_list(struct net_device *dev)
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static u16
-ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
-                     void *accel_priv, select_queue_fallback_t fallback)
-{
-       /* we are currently only using the first queue */
-       return 0;
-}
-
 static int
 ltq_etop_init(struct net_device *dev)
 {
@@ -641,7 +633,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
        .ndo_set_mac_address = ltq_etop_set_mac_address,
        .ndo_validate_addr = eth_validate_addr,
        .ndo_set_rx_mode = ltq_etop_set_multicast_list,
-       .ndo_select_queue = ltq_etop_select_queue,
+       .ndo_select_queue = dev_pick_tx_zero,
        .ndo_init = ltq_etop_init,
        .ndo_tx_timeout = ltq_etop_tx_timeout,
 };
index cc2f7701e71e1b033c4bd7ceb78c970351f4d9ee..f33fd22b351c856a3544cdd9628a9da500d13abf 100644 (file)
@@ -18,8 +18,8 @@ if NET_VENDOR_MARVELL
 
 config MV643XX_ETH
        tristate "Marvell Discovery (643XX) and Orion ethernet support"
-       depends on (MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST) && INET
-       depends on HAS_DMA
+       depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST
+       depends on INET
        select PHYLIB
        select MVMDIO
        ---help---
@@ -58,7 +58,6 @@ config MVNETA_BM_ENABLE
 config MVNETA
        tristate "Marvell Armada 370/38x/XP/37xx network interface support"
        depends on ARCH_MVEBU || COMPILE_TEST
-       depends on HAS_DMA
        select MVMDIO
        select PHYLINK
        ---help---
@@ -84,7 +83,6 @@ config MVNETA_BM
 config MVPP2
        tristate "Marvell Armada 375/7K/8K network interface support"
        depends on ARCH_MVEBU || COMPILE_TEST
-       depends on HAS_DMA
        select MVMDIO
        select PHYLINK
        ---help---
@@ -93,7 +91,7 @@ config MVPP2
 
 config PXA168_ETH
        tristate "Marvell pxa168 ethernet support"
-       depends on HAS_IOMEM && HAS_DMA
+       depends on HAS_IOMEM
        depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
        select PHYLIB
        ---help---
index 17a904cc6a5e0fbe538f42ec2b00573e035c2955..0ad2f3f7da85a029b5dea7dd3ce67b69d4ff8605 100644 (file)
@@ -1932,7 +1932,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
                rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
                index = rx_desc - rxq->descs;
                data = rxq->buf_virt_addr[index];
-               phys_addr = rx_desc->buf_phys_addr;
+               phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
 
                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
index 4d11dd9e3246871fa37f12805c12de7bbe4c7717..51f65a202c6ed651304895a912e54ba72bc16750 100644 (file)
@@ -4,4 +4,4 @@
 #
 obj-$(CONFIG_MVPP2) := mvpp2.o
 
-mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o
+mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
index def00dc3eb4e993887d2d3fd8f42a24116ab867d..67b9e81b7c0246435c26680e06939ef2e061bfd7 100644 (file)
@@ -1,17 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Definitions for Marvell PPv2 network controller for Armada 375 SoC.
  *
  * Copyright (C) 2014 Marvell
  *
  * Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 #ifndef _MVPP2_H_
 #define _MVPP2_H_
 
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #define MVPP2_PRS_SRAM_DATA_REG(idx)           (0x1204 + (idx) * 4)
 #define MVPP2_PRS_TCAM_CTRL_REG                        0x1230
 #define     MVPP2_PRS_TCAM_EN_MASK             BIT(0)
+#define MVPP2_PRS_TCAM_HIT_IDX_REG             0x1240
+#define MVPP2_PRS_TCAM_HIT_CNT_REG             0x1244
+#define     MVPP2_PRS_TCAM_HIT_CNT_MASK                GENMASK(15, 0)
 
 /* RSS Registers */
 #define MVPP22_RSS_INDEX                       0x1500
 #define     MVPP22_RSS_INDEX_TABLE_ENTRY(idx)  (idx)
 #define     MVPP22_RSS_INDEX_TABLE(idx)                ((idx) << 8)
 #define     MVPP22_RSS_INDEX_QUEUE(idx)                ((idx) << 16)
-#define MVPP22_RSS_TABLE_ENTRY                 0x1508
-#define MVPP22_RSS_TABLE                       0x1510
+#define MVPP22_RXQ2RSS_TABLE                   0x1504
 #define     MVPP22_RSS_TABLE_POINTER(p)                (p)
+#define MVPP22_RSS_TABLE_ENTRY                 0x1508
 #define MVPP22_RSS_WIDTH                       0x150c
 
 /* Classifier Registers */
 #define     MVPP2_CLS_LKP_INDEX_WAY_OFFS       6
 #define MVPP2_CLS_LKP_TBL_REG                  0x1818
 #define     MVPP2_CLS_LKP_TBL_RXQ_MASK         0xff
+#define     MVPP2_CLS_LKP_FLOW_PTR(flow)       ((flow) << 16)
 #define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK   BIT(25)
 #define MVPP2_CLS_FLOW_INDEX_REG               0x1820
 #define MVPP2_CLS_FLOW_TBL0_REG                        0x1824
+#define     MVPP2_CLS_FLOW_TBL0_LAST           BIT(0)
+#define     MVPP2_CLS_FLOW_TBL0_ENG_MASK       0x7
+#define     MVPP2_CLS_FLOW_TBL0_OFFS           1
+#define     MVPP2_CLS_FLOW_TBL0_ENG(x)         ((x) << 1)
+#define     MVPP2_CLS_FLOW_TBL0_PORT_ID_MASK   0xff
+#define     MVPP2_CLS_FLOW_TBL0_PORT_ID(port)  ((port) << 4)
+#define     MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL    BIT(23)
 #define MVPP2_CLS_FLOW_TBL1_REG                        0x1828
+#define     MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK  0x7
+#define     MVPP2_CLS_FLOW_TBL1_N_FIELDS(x)    (x)
+#define     MVPP2_CLS_FLOW_TBL1_PRIO_MASK      0x3f
+#define     MVPP2_CLS_FLOW_TBL1_PRIO(x)                ((x) << 9)
+#define     MVPP2_CLS_FLOW_TBL1_SEQ_MASK       0x7
+#define     MVPP2_CLS_FLOW_TBL1_SEQ(x)         ((x) << 15)
 #define MVPP2_CLS_FLOW_TBL2_REG                        0x182c
+#define     MVPP2_CLS_FLOW_TBL2_FLD_MASK       0x3f
+#define     MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n)    ((n) * 6)
+#define     MVPP2_CLS_FLOW_TBL2_FLD(n, x)      ((x) << ((n) * 6))
 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)   (0x1980 + ((port) * 4))
 #define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS    3
 #define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK    0x7
 #define MVPP2_CLS_SWFWD_PCTRL_REG              0x19d0
 #define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)   (1 << (port))
 
+/* Classifier C2 engine Registers */
+#define MVPP22_CLS_C2_TCAM_IDX                 0x1b00
+#define MVPP22_CLS_C2_TCAM_DATA0               0x1b10
+#define MVPP22_CLS_C2_TCAM_DATA1               0x1b14
+#define MVPP22_CLS_C2_TCAM_DATA2               0x1b18
+#define MVPP22_CLS_C2_TCAM_DATA3               0x1b1c
+#define MVPP22_CLS_C2_TCAM_DATA4               0x1b20
+#define     MVPP22_CLS_C2_PORT_ID(port)                ((port) << 8)
+#define MVPP22_CLS_C2_HIT_CTR                  0x1b50
+#define MVPP22_CLS_C2_ACT                      0x1b60
+#define     MVPP22_CLS_C2_ACT_RSS_EN(act)      (((act) & 0x3) << 19)
+#define     MVPP22_CLS_C2_ACT_FWD(act)         (((act) & 0x7) << 13)
+#define     MVPP22_CLS_C2_ACT_QHIGH(act)       (((act) & 0x3) << 11)
+#define     MVPP22_CLS_C2_ACT_QLOW(act)                (((act) & 0x3) << 9)
+#define MVPP22_CLS_C2_ATTR0                    0x1b64
+#define     MVPP22_CLS_C2_ATTR0_QHIGH(qh)      (((qh) & 0x1f) << 24)
+#define     MVPP22_CLS_C2_ATTR0_QHIGH_MASK     0x1f
+#define     MVPP22_CLS_C2_ATTR0_QHIGH_OFFS     24
+#define     MVPP22_CLS_C2_ATTR0_QLOW(ql)       (((ql) & 0x7) << 21)
+#define     MVPP22_CLS_C2_ATTR0_QLOW_MASK      0x7
+#define     MVPP22_CLS_C2_ATTR0_QLOW_OFFS      21
+#define MVPP22_CLS_C2_ATTR1                    0x1b68
+#define MVPP22_CLS_C2_ATTR2                    0x1b6c
+#define     MVPP22_CLS_C2_ATTR2_RSS_EN         BIT(30)
+#define MVPP22_CLS_C2_ATTR3                    0x1b70
+
 /* Descriptor Manager Top Registers */
 #define MVPP2_RXQ_NUM_REG                      0x2040
 #define MVPP2_RXQ_DESC_ADDR_REG                        0x2044
 #define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK  0xff00
 #define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
 
+/* Hit counters registers */
+#define MVPP2_CTRS_IDX                         0x7040
+#define MVPP2_CLS_DEC_TBL_HIT_CTR              0x7700
+#define MVPP2_CLS_FLOW_TBL_HIT_CTR             0x7704
+
 /* TX Scheduler registers */
 #define MVPP2_TXP_SCHED_PORT_INDEX_REG         0x8000
 #define MVPP2_TXP_SCHED_Q_CMD_REG              0x8004
 #define MVPP2_MAX_SKB_DESCS            (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 
 /* Dfault number of RXQs in use */
-#define MVPP2_DEFAULT_RXQ              4
+#define MVPP2_DEFAULT_RXQ              1
 
 /* Max number of Rx descriptors */
 #define MVPP2_MAX_RXD_MAX              1024
        ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
 
 #define MVPP2_BIT_TO_BYTE(bit)         ((bit) / 8)
+#define MVPP2_BIT_TO_WORD(bit)         ((bit) / 32)
+#define MVPP2_BIT_IN_WORD(bit)         ((bit) % 32)
+
+/* RSS constants */
+#define MVPP22_RSS_TABLE_ENTRIES       32
 
 /* IPv6 max L3 address size */
 #define MVPP2_MAX_L3_ADDR_SIZE         16
@@ -703,6 +757,9 @@ struct mvpp2 {
        /* Workqueue to gather hardware statistics */
        char queue_name[30];
        struct workqueue_struct *stats_queue;
+
+       /* Debugfs root entry */
+       struct dentry *dbgfs_dir;
 };
 
 struct mvpp2_pcpu_stats {
@@ -795,6 +852,9 @@ struct mvpp2_port {
        bool has_tx_irqs;
 
        u32 tx_time_coal;
+
+       /* RSS indirection table */
+       u32 indir[MVPP22_RSS_TABLE_ENTRIES];
 };
 
 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -831,52 +891,52 @@ struct mvpp2_port {
 
 /* HW TX descriptor for PPv2.1 */
 struct mvpp21_tx_desc {
-       u32 command;            /* Options used by HW for packet transmitting.*/
+       __le32 command;         /* Options used by HW for packet transmitting.*/
        u8  packet_offset;      /* the offset from the buffer beginning */
        u8  phys_txq;           /* destination queue ID                 */
-       u16 data_size;          /* data size of transmitted packet in bytes */
-       u32 buf_dma_addr;       /* physical addr of transmitted buffer  */
-       u32 buf_cookie;         /* cookie for access to TX buffer in tx path */
-       u32 reserved1[3];       /* hw_cmd (for future use, BM, PON, PNC) */
-       u32 reserved2;          /* reserved (for future use)            */
+       __le16 data_size;       /* data size of transmitted packet in bytes */
+       __le32 buf_dma_addr;    /* physical addr of transmitted buffer  */
+       __le32 buf_cookie;      /* cookie for access to TX buffer in tx path */
+       __le32 reserved1[3];    /* hw_cmd (for future use, BM, PON, PNC) */
+       __le32 reserved2;       /* reserved (for future use)            */
 };
 
 /* HW RX descriptor for PPv2.1 */
 struct mvpp21_rx_desc {
-       u32 status;             /* info about received packet           */
-       u16 reserved1;          /* parser_info (for future use, PnC)    */
-       u16 data_size;          /* size of received packet in bytes     */
-       u32 buf_dma_addr;       /* physical address of the buffer       */
-       u32 buf_cookie;         /* cookie for access to RX buffer in rx path */
-       u16 reserved2;          /* gem_port_id (for future use, PON)    */
-       u16 reserved3;          /* csum_l4 (for future use, PnC)        */
+       __le32 status;          /* info about received packet           */
+       __le16 reserved1;       /* parser_info (for future use, PnC)    */
+       __le16 data_size;       /* size of received packet in bytes     */
+       __le32 buf_dma_addr;    /* physical address of the buffer       */
+       __le32 buf_cookie;      /* cookie for access to RX buffer in rx path */
+       __le16 reserved2;       /* gem_port_id (for future use, PON)    */
+       __le16 reserved3;       /* csum_l4 (for future use, PnC)        */
        u8  reserved4;          /* bm_qset (for future use, BM)         */
        u8  reserved5;
-       u16 reserved6;          /* classify_info (for future use, PnC)  */
-       u32 reserved7;          /* flow_id (for future use, PnC) */
-       u32 reserved8;
+       __le16 reserved6;       /* classify_info (for future use, PnC)  */
+       __le32 reserved7;       /* flow_id (for future use, PnC) */
+       __le32 reserved8;
 };
 
 /* HW TX descriptor for PPv2.2 */
 struct mvpp22_tx_desc {
-       u32 command;
+       __le32 command;
        u8  packet_offset;
        u8  phys_txq;
-       u16 data_size;
-       u64 reserved1;
-       u64 buf_dma_addr_ptp;
-       u64 buf_cookie_misc;
+       __le16 data_size;
+       __le64 reserved1;
+       __le64 buf_dma_addr_ptp;
+       __le64 buf_cookie_misc;
 };
 
 /* HW RX descriptor for PPv2.2 */
 struct mvpp22_rx_desc {
-       u32 status;
-       u16 reserved1;
-       u16 data_size;
-       u32 reserved2;
-       u32 reserved3;
-       u64 buf_dma_addr_key_hash;
-       u64 buf_cookie_misc;
+       __le32 status;
+       __le16 reserved1;
+       __le16 data_size;
+       __le32 reserved2;
+       __le32 reserved3;
+       __le64 buf_dma_addr_key_hash;
+       __le64 buf_cookie_misc;
 };
 
 /* Opaque type used by the driver to manipulate the HW TX and RX
@@ -1043,4 +1103,8 @@ u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset);
 void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset,
                                u32 data);
 
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+
 #endif
index 8581d5b17dd51f4a06d4c4d622df8ea3e51cee25..efdb7a65683576a84806639630fce4d0928defcd 100644 (file)
+// SPDX-License-Identifier: GPL-2.0
 /*
  * RSS and Classifier helpers for Marvell PPv2 Network Controller
  *
  * Copyright (C) 2014 Marvell
  *
  * Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include "mvpp2.h"
 #include "mvpp2_cls.h"
+#include "mvpp2_prs.h"
+
+#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask)       \
+{                                                              \
+       .flow_type = _type,                                     \
+       .flow_id = _id,                                         \
+       .supported_hash_opts = _opts,                           \
+       .prs_ri = {                                             \
+               .ri = _ri,                                      \
+               .ri_mask = _ri_mask                             \
+       }                                                       \
+}
+
+static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
+       /* TCP over IPv4 flows, Not fragmented, no vlan tag */
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+                      MVPP22_CLS_HEK_IP4_5T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+                      MVPP22_CLS_HEK_IP4_5T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+                      MVPP22_CLS_HEK_IP4_5T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       /* TCP over IPv4 flows, Not fragmented, with vlan tag */
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+                      MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+                      MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+                      MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK),
+
+       /* TCP over IPv4 flows, fragmented, no vlan tag */
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+                      MVPP22_CLS_HEK_IP4_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+                      MVPP22_CLS_HEK_IP4_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+                      MVPP22_CLS_HEK_IP4_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       /* TCP over IPv4 flows, fragmented, with vlan tag */
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+                      MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+                      MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+                      MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK),
+
+       /* UDP over IPv4 flows, Not fragmented, no vlan tag */
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+                      MVPP22_CLS_HEK_IP4_5T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+                      MVPP22_CLS_HEK_IP4_5T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+                      MVPP22_CLS_HEK_IP4_5T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       /* UDP over IPv4 flows, Not fragmented, with vlan tag */
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+                      MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+                      MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+                      MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK),
+
+       /* UDP over IPv4 flows, fragmented, no vlan tag */
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+                      MVPP22_CLS_HEK_IP4_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+                      MVPP22_CLS_HEK_IP4_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+                      MVPP22_CLS_HEK_IP4_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       /* UDP over IPv4 flows, fragmented, with vlan tag */
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+                      MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+                      MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+                      MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK),
+
+       /* TCP over IPv6 flows, not fragmented, no vlan tag */
+       MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+                      MVPP22_CLS_HEK_IP6_5T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+                      MVPP22_CLS_HEK_IP6_5T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       /* TCP over IPv6 flows, not fragmented, with vlan tag */
+       MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+                      MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+                      MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK),
+
+       /* TCP over IPv6 flows, fragmented, no vlan tag */
+       MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+                      MVPP22_CLS_HEK_IP6_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+                      MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+                      MVPP22_CLS_HEK_IP6_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+                      MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       /* TCP over IPv6 flows, fragmented, with vlan tag */
+       MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+                      MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+                      MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_IP_MASK),
+
+       /* UDP over IPv6 flows, not fragmented, no vlan tag */
+       MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+                      MVPP22_CLS_HEK_IP6_5T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+                      MVPP22_CLS_HEK_IP6_5T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       /* UDP over IPv6 flows, not fragmented, with vlan tag */
+       MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+                      MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+                      MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK),
+
+       /* UDP over IPv6 flows, fragmented, no vlan tag */
+       MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+                      MVPP22_CLS_HEK_IP6_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
+                      MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+                      MVPP22_CLS_HEK_IP6_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
+                      MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+
+       /* UDP over IPv6 flows, fragmented, with vlan tag */
+       MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+                      MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK),
+
+       MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+                      MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_IP_MASK),
+
+       /* IPv4 flows, no vlan tag */
+       MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+                      MVPP22_CLS_HEK_IP4_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
+                      MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+       MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+                      MVPP22_CLS_HEK_IP4_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
+                      MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+       MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+                      MVPP22_CLS_HEK_IP4_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
+                      MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+       /* IPv4 flows, with vlan tag */
+       MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+                      MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4,
+                      MVPP2_PRS_RI_L3_PROTO_MASK),
+       MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+                      MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4_OPT,
+                      MVPP2_PRS_RI_L3_PROTO_MASK),
+       MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+                      MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP4_OTHER,
+                      MVPP2_PRS_RI_L3_PROTO_MASK),
+
+       /* IPv6 flows, no vlan tag */
+       MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+                      MVPP22_CLS_HEK_IP6_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+                      MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+       MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+                      MVPP22_CLS_HEK_IP6_2T,
+                      MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
+                      MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
+
+       /* IPv6 flows, with vlan tag */
+       MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+                      MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP6,
+                      MVPP2_PRS_RI_L3_PROTO_MASK),
+       MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+                      MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
+                      MVPP2_PRS_RI_L3_IP6,
+                      MVPP2_PRS_RI_L3_PROTO_MASK),
+
+       /* Non IP flow, no vlan tag */
+       MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
+                      0,
+                      MVPP2_PRS_RI_VLAN_NONE,
+                      MVPP2_PRS_RI_VLAN_MASK),
+       /* Non IP flow, with vlan tag */
+       MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
+                      MVPP22_CLS_HEK_OPT_VLAN,
+                      0, 0),
+};
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
+{
+       mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+       return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+                        struct mvpp2_cls_flow_entry *fe)
+{
+       fe->index = index;
+       mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
+       fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
+       fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
+       fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
+}
 
 /* Update classification flow table registers */
 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
@@ -23,6 +349,25 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv,
        mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
 }
 
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
+{
+       mvpp2_write(priv, MVPP2_CTRS_IDX, index);
+
+       return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
+}
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+                          struct mvpp2_cls_lookup_entry *le)
+{
+       u32 val;
+
+       val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
+       mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+       le->way = way;
+       le->lkpid = lkpid;
+       le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
+}
+
 /* Update classification lookup table register */
 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
                                   struct mvpp2_cls_lookup_entry *le)
@@ -34,6 +379,439 @@ static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
        mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
 }
 
+/* Operations on flow entry */
+static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
+{
+       return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+}
+
+static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
+                                      int num_of_fields)
+{
+       fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
+       fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
+}
+
+static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
+                                 int field_index)
+{
+       return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
+               MVPP2_CLS_FLOW_TBL2_FLD_MASK;
+}
+
+static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
+                                  int field_index, int field_id)
+{
+       fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
+                                               MVPP2_CLS_FLOW_TBL2_FLD_MASK);
+       fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
+}
+
+static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
+                                  int engine)
+{
+       fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
+       fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
+}
+
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
+{
+       return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
+               MVPP2_CLS_FLOW_TBL0_ENG_MASK;
+}
+
+static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
+                                      bool from_packet)
+{
+       if (from_packet)
+               fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+       else
+               fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
+}
+
+static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
+{
+       fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
+       fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
+}
+
+static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
+                                   bool is_last)
+{
+       fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
+       fe->data[0] |= !!is_last;
+}
+
+static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
+{
+       fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
+       fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
+}
+
+static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
+                                   u32 port)
+{
+       fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
+}
+
+/* Initialize the parser entry for the given flow */
+static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
+                                   struct mvpp2_cls_flow *flow)
+{
+       mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
+                          flow->prs_ri.ri_mask);
+}
+
+/* Initialize the Lookup Id table entry for the given flow */
+static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
+                                   struct mvpp2_cls_flow *flow)
+{
+       struct mvpp2_cls_lookup_entry le;
+
+       le.way = 0;
+       le.lkpid = flow->flow_id;
+
+       /* The default RxQ for this port is set in the C2 lookup */
+       le.data = 0;
+
+       /* We point on the first lookup in the sequence for the flow, that is
+        * the C2 lookup.
+        */
+       le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
+
+       /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
+       le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+       mvpp2_cls_lookup_write(priv, &le);
+}
+
+/* Initialize the flow table entries for the given flow */
+static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
+{
+       struct mvpp2_cls_flow_entry fe;
+       int i;
+
+       /* C2 lookup */
+       memset(&fe, 0, sizeof(fe));
+       fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
+
+       mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
+       mvpp2_cls_flow_port_id_sel(&fe, true);
+       mvpp2_cls_flow_last_set(&fe, 0);
+       mvpp2_cls_flow_pri_set(&fe, 0);
+       mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
+
+       /* Add all ports */
+       for (i = 0; i < MVPP2_MAX_PORTS; i++)
+               mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+       mvpp2_cls_flow_write(priv, &fe);
+
+       /* C3Hx lookups */
+       for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+               memset(&fe, 0, sizeof(fe));
+               fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
+
+               mvpp2_cls_flow_port_id_sel(&fe, true);
+               mvpp2_cls_flow_pri_set(&fe, i + 1);
+               mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
+               mvpp2_cls_flow_port_add(&fe, BIT(i));
+
+               mvpp2_cls_flow_write(priv, &fe);
+       }
+
+       /* Update the last entry */
+       mvpp2_cls_flow_last_set(&fe, 1);
+       mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
+
+       mvpp2_cls_flow_write(priv, &fe);
+}
+
+/* Adds a field to the Header Extracted Key generation parameters*/
+static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
+                                   u32 field_id)
+{
+       int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+       if (nb_fields == MVPP2_FLOW_N_FIELDS)
+               return -EINVAL;
+
+       mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
+
+       mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
+
+       return 0;
+}
+
+static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
+                                    unsigned long hash_opts)
+{
+       u32 field_id;
+       int i;
+
+       /* Clear old fields */
+       mvpp2_cls_flow_hek_num_set(fe, 0);
+       fe->data[2] = 0;
+
+       for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+               switch (BIT(i)) {
+               case MVPP22_CLS_HEK_OPT_VLAN:
+                       field_id = MVPP22_CLS_FIELD_VLAN;
+                       break;
+               case MVPP22_CLS_HEK_OPT_IP4SA:
+                       field_id = MVPP22_CLS_FIELD_IP4SA;
+                       break;
+               case MVPP22_CLS_HEK_OPT_IP4DA:
+                       field_id = MVPP22_CLS_FIELD_IP4DA;
+                       break;
+               case MVPP22_CLS_HEK_OPT_IP6SA:
+                       field_id = MVPP22_CLS_FIELD_IP6SA;
+                       break;
+               case MVPP22_CLS_HEK_OPT_IP6DA:
+                       field_id = MVPP22_CLS_FIELD_IP6DA;
+                       break;
+               case MVPP22_CLS_HEK_OPT_L4SIP:
+                       field_id = MVPP22_CLS_FIELD_L4SIP;
+                       break;
+               case MVPP22_CLS_HEK_OPT_L4DIP:
+                       field_id = MVPP22_CLS_FIELD_L4DIP;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               if (mvpp2_flow_add_hek_field(fe, field_id))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+{
+       if (flow >= MVPP2_N_FLOWS)
+               return NULL;
+
+       return &cls_flows[flow];
+}
+
+/* Set the hash generation options for the given traffic flow.
+ * One traffic flow (in the ethtool sense) has multiple classification flows,
+ * to handle specific cases such as fragmentation, or the presence of a
+ * VLAN / DSA Tag.
+ *
+ * Each of these individual flows has different constraints, for example we
+ * can't hash fragmented packets on L4 data (else we would risk having packet
+ * re-ordering), so each classification flows masks the options with their
+ * supported ones.
+ *
+ */
+static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
+                                       u16 requested_opts)
+{
+       struct mvpp2_cls_flow_entry fe;
+       struct mvpp2_cls_flow *flow;
+       int i, engine, flow_index;
+       u16 hash_opts;
+
+       for (i = 0; i < MVPP2_N_FLOWS; i++) {
+               flow = mvpp2_cls_flow_get(i);
+               if (!flow)
+                       return -EINVAL;
+
+               if (flow->flow_type != flow_type)
+                       continue;
+
+               flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
+                                                       flow->flow_id);
+
+               mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+               hash_opts = flow->supported_hash_opts & requested_opts;
+
+               /* Use C3HB engine to access L4 infos. This adds L4 infos to the
+                * hash parameters
+                */
+               if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
+                       engine = MVPP22_CLS_ENGINE_C3HB;
+               else
+                       engine = MVPP22_CLS_ENGINE_C3HA;
+
+               if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
+                       return -EINVAL;
+
+               mvpp2_cls_flow_eng_set(&fe, engine);
+
+               mvpp2_cls_flow_write(port->priv, &fe);
+       }
+
+       return 0;
+}
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
+{
+       u16 hash_opts = 0;
+       int n_fields, i, field;
+
+       n_fields = mvpp2_cls_flow_hek_num_get(fe);
+
+       for (i = 0; i < n_fields; i++) {
+               field = mvpp2_cls_flow_hek_get(fe, i);
+
+               switch (field) {
+               case MVPP22_CLS_FIELD_MAC_DA:
+                       hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+                       break;
+               case MVPP22_CLS_FIELD_VLAN:
+                       hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+                       break;
+               case MVPP22_CLS_FIELD_L3_PROTO:
+                       hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+                       break;
+               case MVPP22_CLS_FIELD_IP4SA:
+                       hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
+                       break;
+               case MVPP22_CLS_FIELD_IP4DA:
+                       hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
+                       break;
+               case MVPP22_CLS_FIELD_IP6SA:
+                       hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
+                       break;
+               case MVPP22_CLS_FIELD_IP6DA:
+                       hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
+                       break;
+               case MVPP22_CLS_FIELD_L4SIP:
+                       hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+                       break;
+               case MVPP22_CLS_FIELD_L4DIP:
+                       hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+                       break;
+               default:
+                       break;
+               }
+       }
+       return hash_opts;
+}
+
+/* Returns the hash opts for this flow. There are several classifier flows
+ * for one traffic flow, this returns an aggregation of all configurations.
+ */
+static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
+{
+       struct mvpp2_cls_flow_entry fe;
+       struct mvpp2_cls_flow *flow;
+       int i, flow_index;
+       u16 hash_opts = 0;
+
+       for (i = 0; i < MVPP2_N_FLOWS; i++) {
+               flow = mvpp2_cls_flow_get(i);
+               if (!flow)
+                       return 0;
+
+               if (flow->flow_type != flow_type)
+                       continue;
+
+               flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
+                                                       flow->flow_id);
+
+               mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+               hash_opts |= mvpp2_flow_get_hek_fields(&fe);
+       }
+
+       return hash_opts;
+}
+
+static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
+{
+       struct mvpp2_cls_flow *flow;
+       int i;
+
+       for (i = 0; i < MVPP2_N_FLOWS; i++) {
+               flow = mvpp2_cls_flow_get(i);
+               if (!flow)
+                       break;
+
+               mvpp2_cls_flow_prs_init(priv, flow);
+               mvpp2_cls_flow_lkp_init(priv, flow);
+               mvpp2_cls_flow_init(priv, flow);
+       }
+}
+
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+                              struct mvpp2_cls_c2_entry *c2)
+{
+       mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+       /* Write TCAM */
+       mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+       mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+       mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+       mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+       mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+
+       mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+       mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+       mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+       mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+       mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+                      struct mvpp2_cls_c2_entry *c2)
+{
+       mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+       c2->index = index;
+
+       c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+       c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+       c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+       c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+       c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+       c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+       c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+       c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+       c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+       c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+}
+
+static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
+{
+       struct mvpp2_cls_c2_entry c2;
+       u8 qh, ql, pmap;
+
+       memset(&c2, 0, sizeof(c2));
+
+       c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
+
+       pmap = BIT(port->id);
+       c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
+       c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+
+       /* Update RSS status after matching this entry */
+       c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
+
+       /* Mark packet as "forwarded to software", needed for RSS */
+       c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
+
+       /* Configure the default rx queue : Update Queue Low and Queue High, but
+        * don't lock, since the rx queue selection might be overridden by RSS
+        */
+       c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
+                  MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
+
+       qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+       ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+       c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+                     MVPP22_CLS_C2_ATTR0_QLOW(ql);
+
+       mvpp2_cls_c2_write(port->priv, &c2);
+}
+
 /* Classifier default initialization */
 void mvpp2_cls_init(struct mvpp2 *priv)
 {
@@ -61,6 +839,8 @@ void mvpp2_cls_init(struct mvpp2 *priv)
                le.way = 1;
                mvpp2_cls_lookup_write(priv, &le);
        }
+
+       mvpp2_cls_port_init_flows(priv);
 }
 
 void mvpp2_cls_port_config(struct mvpp2_port *port)
@@ -89,6 +869,47 @@ void mvpp2_cls_port_config(struct mvpp2_port *port)
 
        /* Update lookup ID table entry */
        mvpp2_cls_lookup_write(port->priv, &le);
+
+       mvpp2_port_c2_cls_init(port);
+}
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
+{
+       mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
+
+       return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
+}
+
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
+{
+       struct mvpp2_cls_c2_entry c2;
+
+       mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+       c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+       mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
+{
+       struct mvpp2_cls_c2_entry c2;
+
+       mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+       c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
+
+       mvpp2_cls_c2_write(port->priv, &c2);
+}
+
+void mvpp22_rss_enable(struct mvpp2_port *port)
+{
+       mvpp2_rss_port_c2_enable(port);
+}
+
+void mvpp22_rss_disable(struct mvpp2_port *port)
+{
+       mvpp2_rss_port_c2_disable(port);
 }
 
 /* Set CPU queue number for oversize packets */
@@ -107,7 +928,116 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
        mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
 }
 
-void mvpp22_init_rss(struct mvpp2_port *port)
+static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
+{
+       int nrxqs, cpu, cpus = num_possible_cpus();
+
+       /* Number of RXQs per CPU */
+       nrxqs = port->nrxqs / cpus;
+
+       /* CPU that will handle this rx queue */
+       cpu = rxq / nrxqs;
+
+       if (!cpu_online(cpu))
+               return port->first_rxq;
+
+       /* Indirection to better distribute the paquets on the CPUs when
+        * configuring the RSS queues.
+        */
+       return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
+}
+
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
+{
+       struct mvpp2 *priv = port->priv;
+       int i;
+
+       for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
+               u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
+                         MVPP22_RSS_INDEX_TABLE_ENTRY(i);
+               mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+
+               mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
+                           mvpp22_rxfh_indir(port, port->indir[i]));
+       }
+}
+
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+       u16 hash_opts = 0;
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+       case TCP_V6_FLOW:
+       case UDP_V6_FLOW:
+               if (info->data & RXH_L4_B_0_1)
+                       hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
+               if (info->data & RXH_L4_B_2_3)
+                       hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
+               /* Fallthrough */
+       case IPV4_FLOW:
+       case IPV6_FLOW:
+               if (info->data & RXH_L2DA)
+                       hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
+               if (info->data & RXH_VLAN)
+                       hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
+               if (info->data & RXH_L3_PROTO)
+                       hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
+               if (info->data & RXH_IP_SRC)
+                       hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
+                                    MVPP22_CLS_HEK_OPT_IP6SA);
+               if (info->data & RXH_IP_DST)
+                       hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
+                                    MVPP22_CLS_HEK_OPT_IP6DA);
+               break;
+       default: return -EOPNOTSUPP;
+       }
+
+       return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
+}
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+{
+       unsigned long hash_opts;
+       int i;
+
+       hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
+       info->data = 0;
+
+       for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
+               switch (BIT(i)) {
+               case MVPP22_CLS_HEK_OPT_MAC_DA:
+                       info->data |= RXH_L2DA;
+                       break;
+               case MVPP22_CLS_HEK_OPT_VLAN:
+                       info->data |= RXH_VLAN;
+                       break;
+               case MVPP22_CLS_HEK_OPT_L3_PROTO:
+                       info->data |= RXH_L3_PROTO;
+                       break;
+               case MVPP22_CLS_HEK_OPT_IP4SA:
+               case MVPP22_CLS_HEK_OPT_IP6SA:
+                       info->data |= RXH_IP_SRC;
+                       break;
+               case MVPP22_CLS_HEK_OPT_IP4DA:
+               case MVPP22_CLS_HEK_OPT_IP6DA:
+                       info->data |= RXH_IP_DST;
+                       break;
+               case MVPP22_CLS_HEK_OPT_L4SIP:
+                       info->data |= RXH_L4_B_0_1;
+                       break;
+               case MVPP22_CLS_HEK_OPT_L4DIP:
+                       info->data |= RXH_L4_B_2_3;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+void mvpp22_rss_port_init(struct mvpp2_port *port)
 {
        struct mvpp2 *priv = port->priv;
        int i;
@@ -115,27 +1045,30 @@ void mvpp22_init_rss(struct mvpp2_port *port)
        /* Set the table width: replace the whole classifier Rx queue number
         * with the ones configured in RSS table entries.
         */
-       mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
+       mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
        mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
 
-       /* Loop through the classifier Rx Queues and map them to a RSS table.
-        * Map them all to the first table (0) by default.
+       /* The default RxQ is used as a key to select the RSS table to use.
+        * We use one RSS table per port.
         */
-       for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
-               mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
-               mvpp2_write(priv, MVPP22_RSS_TABLE,
-                           MVPP22_RSS_TABLE_POINTER(0));
-       }
+       mvpp2_write(priv, MVPP22_RSS_INDEX,
+                   MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
+       mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
+                   MVPP22_RSS_TABLE_POINTER(port->id));
 
        /* Configure the first table to evenly distribute the packets across
-        * real Rx Queues. The table entries map a hash to an port Rx Queue.
+        * real Rx Queues. The table entries map a hash to a port Rx Queue.
         */
-       for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
-               u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
-                         MVPP22_RSS_INDEX_TABLE_ENTRY(i);
-               mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+       for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
+               port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
 
-               mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
-       }
+       mvpp22_rss_fill_table(port, port->id);
 
+       /* Configure default flows */
+       mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
+       mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
+       mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+       mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
+       mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
+       mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
 }
index 8e1d7f9ffa0b3cd222ec796106c4e91426391d10..089f05f298917ae394b9245cdb255259d269b3c7 100644 (file)
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * RSS and Classifier definitions for Marvell PPv2 Network Controller
  *
  * Copyright (C) 2014 Marvell
  *
  * Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #ifndef _MVPP2_CLS_H_
 #define _MVPP2_CLS_H_
 
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+
 /* Classifier constants */
 #define MVPP2_CLS_FLOWS_TBL_SIZE       512
 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
 #define MVPP2_CLS_LKP_TBL_SIZE         64
 #define MVPP2_CLS_RX_QUEUES            256
 
-/* RSS constants */
-#define MVPP22_RSS_TABLE_ENTRIES       32
+/* Classifier flow constants */
+
+#define MVPP2_FLOW_N_FIELDS            4
+
+enum mvpp2_cls_engine {
+       MVPP22_CLS_ENGINE_C2 = 1,
+       MVPP22_CLS_ENGINE_C3A,
+       MVPP22_CLS_ENGINE_C3B,
+       MVPP22_CLS_ENGINE_C4,
+       MVPP22_CLS_ENGINE_C3HA = 6,
+       MVPP22_CLS_ENGINE_C3HB = 7,
+};
+
+#define MVPP22_CLS_HEK_OPT_MAC_DA      BIT(0)
+#define MVPP22_CLS_HEK_OPT_VLAN                BIT(1)
+#define MVPP22_CLS_HEK_OPT_L3_PROTO    BIT(2)
+#define MVPP22_CLS_HEK_OPT_IP4SA       BIT(3)
+#define MVPP22_CLS_HEK_OPT_IP4DA       BIT(4)
+#define MVPP22_CLS_HEK_OPT_IP6SA       BIT(5)
+#define MVPP22_CLS_HEK_OPT_IP6DA       BIT(6)
+#define MVPP22_CLS_HEK_OPT_L4SIP       BIT(7)
+#define MVPP22_CLS_HEK_OPT_L4DIP       BIT(8)
+#define MVPP22_CLS_HEK_N_FIELDS                9
+
+#define MVPP22_CLS_HEK_L4_OPTS (MVPP22_CLS_HEK_OPT_L4SIP | \
+                                MVPP22_CLS_HEK_OPT_L4DIP)
+
+#define MVPP22_CLS_HEK_IP4_2T  (MVPP22_CLS_HEK_OPT_IP4SA | \
+                                MVPP22_CLS_HEK_OPT_IP4DA)
+
+#define MVPP22_CLS_HEK_IP6_2T  (MVPP22_CLS_HEK_OPT_IP6SA | \
+                                MVPP22_CLS_HEK_OPT_IP6DA)
+
+/* The fifth tuple in "5T" is the L4_Info field */
+#define MVPP22_CLS_HEK_IP4_5T  (MVPP22_CLS_HEK_IP4_2T | \
+                                MVPP22_CLS_HEK_L4_OPTS)
+
+#define MVPP22_CLS_HEK_IP6_5T  (MVPP22_CLS_HEK_IP6_2T | \
+                                MVPP22_CLS_HEK_L4_OPTS)
+
+enum mvpp2_cls_field_id {
+       MVPP22_CLS_FIELD_MAC_DA = 0x03,
+       MVPP22_CLS_FIELD_VLAN = 0x06,
+       MVPP22_CLS_FIELD_L3_PROTO = 0x0f,
+       MVPP22_CLS_FIELD_IP4SA = 0x10,
+       MVPP22_CLS_FIELD_IP4DA = 0x11,
+       MVPP22_CLS_FIELD_IP6SA = 0x17,
+       MVPP22_CLS_FIELD_IP6DA = 0x1a,
+       MVPP22_CLS_FIELD_L4SIP = 0x1d,
+       MVPP22_CLS_FIELD_L4DIP = 0x1e,
+};
+
+enum mvpp2_cls_flow_seq {
+       MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
+       MVPP2_CLS_FLOW_SEQ_FIRST1,
+       MVPP2_CLS_FLOW_SEQ_FIRST2,
+       MVPP2_CLS_FLOW_SEQ_LAST,
+       MVPP2_CLS_FLOW_SEQ_MIDDLE
+};
+
+/* Classifier C2 engine constants */
+#define MVPP22_CLS_C2_TCAM_EN(data)            ((data) << 16)
+
+enum mvpp22_cls_c2_action {
+       MVPP22_C2_NO_UPD = 0,
+       MVPP22_C2_NO_UPD_LOCK,
+       MVPP22_C2_UPD,
+       MVPP22_C2_UPD_LOCK,
+};
+
+enum mvpp22_cls_c2_fwd_action {
+       MVPP22_C2_FWD_NO_UPD = 0,
+       MVPP22_C2_FWD_NO_UPD_LOCK,
+       MVPP22_C2_FWD_SW,
+       MVPP22_C2_FWD_SW_LOCK,
+       MVPP22_C2_FWD_HW,
+       MVPP22_C2_FWD_HW_LOCK,
+       MVPP22_C2_FWD_HW_LOW_LAT,
+       MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
+};
+
+#define MVPP2_CLS_C2_TCAM_WORDS                        5
+#define MVPP2_CLS_C2_ATTR_WORDS                        5
+
+struct mvpp2_cls_c2_entry {
+       u32 index;
+       u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
+       u32 act;
+       u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
+};
+
+/* Classifier C2 engine entries */
+#define MVPP22_CLS_C2_RSS_ENTRY(port)  (port)
+#define MVPP22_CLS_C2_N_ENTRIES                MVPP2_MAX_PORTS
 
+/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
+ *
+ * The first performs a lookup using the C2 TCAM engine, to tag the
+ * packet for software forwarding (needed for RSS), enable or disable RSS, and
+ * assign the default rx queue.
+ *
+ * The second configures the hash generation, by specifying which fields of the
+ * packet header are used to generate the hash, and specifies the relevant hash
+ * engine to use.
+ */
+#define MVPP22_RSS_FLOW_C2_OFFS                0
+#define MVPP22_RSS_FLOW_HASH_OFFS      1
+#define MVPP22_RSS_FLOW_SIZE           (MVPP22_RSS_FLOW_HASH_OFFS + 1)
+
+#define MVPP22_RSS_FLOW_C2(port)       ((port) * MVPP22_RSS_FLOW_SIZE + \
+                                        MVPP22_RSS_FLOW_C2_OFFS)
+#define MVPP22_RSS_FLOW_HASH(port)     ((port) * MVPP22_RSS_FLOW_SIZE + \
+                                        MVPP22_RSS_FLOW_HASH_OFFS)
+#define MVPP22_RSS_FLOW_FIRST(port)    MVPP22_RSS_FLOW_C2(port)
+
+/* Packet flow ID */
+enum mvpp2_prs_flow {
+       MVPP2_FL_START = 8,
+       MVPP2_FL_IP4_TCP_NF_UNTAG = MVPP2_FL_START,
+       MVPP2_FL_IP4_UDP_NF_UNTAG,
+       MVPP2_FL_IP4_TCP_NF_TAG,
+       MVPP2_FL_IP4_UDP_NF_TAG,
+       MVPP2_FL_IP6_TCP_NF_UNTAG,
+       MVPP2_FL_IP6_UDP_NF_UNTAG,
+       MVPP2_FL_IP6_TCP_NF_TAG,
+       MVPP2_FL_IP6_UDP_NF_TAG,
+       MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+       MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+       MVPP2_FL_IP4_TCP_FRAG_TAG,
+       MVPP2_FL_IP4_UDP_FRAG_TAG,
+       MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+       MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+       MVPP2_FL_IP6_TCP_FRAG_TAG,
+       MVPP2_FL_IP6_UDP_FRAG_TAG,
+       MVPP2_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */
+       MVPP2_FL_IP4_TAG,
+       MVPP2_FL_IP6_UNTAG,
+       MVPP2_FL_IP6_TAG,
+       MVPP2_FL_NON_IP_UNTAG,
+       MVPP2_FL_NON_IP_TAG,
+       MVPP2_FL_LAST,
+};
+
+struct mvpp2_cls_flow {
+       /* The L2-L4 traffic flow type */
+       int flow_type;
+
+       /* The first id in the flow table for this flow */
+       u16 flow_id;
+
+       /* The supported HEK fields for this flow */
+       u16 supported_hash_opts;
+
+       /* The Header Parser result_info that matches this flow */
+       struct mvpp2_prs_result_info prs_ri;
+};
+
+#define MVPP2_N_FLOWS  52
+
+#define MVPP2_ENTRIES_PER_FLOW                 (MVPP2_MAX_PORTS + 1)
+#define MVPP2_FLOW_C2_ENTRY(id)                        ((id) * MVPP2_ENTRIES_PER_FLOW)
+#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id)   ((id) * MVPP2_ENTRIES_PER_FLOW + \
+                                               (port) + 1)
 struct mvpp2_cls_flow_entry {
        u32 index;
        u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
@@ -33,7 +193,15 @@ struct mvpp2_cls_lookup_entry {
        u32 data;
 };
 
-void mvpp22_init_rss(struct mvpp2_port *port);
+void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
+
+void mvpp22_rss_port_init(struct mvpp2_port *port);
+
+void mvpp22_rss_enable(struct mvpp2_port *port);
+void mvpp22_rss_disable(struct mvpp2_port *port);
+
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
 
 void mvpp2_cls_init(struct mvpp2 *priv);
 
@@ -41,4 +209,25 @@ void mvpp2_cls_port_config(struct mvpp2_port *port);
 
 void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port);
 
+int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
+
+u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
+
+struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+
+u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
+                        struct mvpp2_cls_flow_entry *fe);
+
+u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index);
+
+void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
+                          struct mvpp2_cls_lookup_entry *le);
+
+u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index);
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+                      struct mvpp2_cls_c2_entry *c2);
+
 #endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
new file mode 100644 (file)
index 0000000..02dfef1
--- /dev/null
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2018 Marvell
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#include "mvpp2.h"
+#include "mvpp2_prs.h"
+#include "mvpp2_cls.h"
+
+struct mvpp2_dbgfs_prs_entry {
+       int tid;
+       struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_flow_entry {
+       int flow;
+       struct mvpp2 *priv;
+};
+
+struct mvpp2_dbgfs_port_flow_entry {
+       struct mvpp2_port *port;
+       struct mvpp2_dbgfs_flow_entry *dbg_fe;
+};
+
+static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_flow_entry *entry = s->private;
+       int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
+
+       u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
+
+       seq_printf(s, "%u\n", hits);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_flt_hits);
+
+static int mvpp2_dbgfs_flow_dec_hits_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_flow_entry *entry = s->private;
+
+       u32 hits = mvpp2_cls_lookup_hits(entry->priv, entry->flow);
+
+       seq_printf(s, "%u\n", hits);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
+
+static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_flow_entry *entry = s->private;
+       struct mvpp2_cls_flow *f;
+       const char *flow_name;
+
+       f = mvpp2_cls_flow_get(entry->flow);
+       if (!f)
+               return -EINVAL;
+
+       switch (f->flow_type) {
+       case IPV4_FLOW:
+               flow_name = "ipv4";
+               break;
+       case IPV6_FLOW:
+               flow_name = "ipv6";
+               break;
+       case TCP_V4_FLOW:
+               flow_name = "tcp4";
+               break;
+       case TCP_V6_FLOW:
+               flow_name = "tcp6";
+               break;
+       case UDP_V4_FLOW:
+               flow_name = "udp4";
+               break;
+       case UDP_V6_FLOW:
+               flow_name = "udp6";
+               break;
+       default:
+               flow_name = "other";
+       }
+
+       seq_printf(s, "%s\n", flow_name);
+
+       return 0;
+}
+
+static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
+}
+
+static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq = file->private_data;
+       struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private;
+
+       kfree(flow_entry);
+       return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
+       .open = mvpp2_dbgfs_flow_type_open,
+       .read = seq_read,
+       .release = mvpp2_dbgfs_flow_type_release,
+};
+
+static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_flow_entry *entry = s->private;
+       struct mvpp2_cls_flow *f;
+
+       f = mvpp2_cls_flow_get(entry->flow);
+       if (!f)
+               return -EINVAL;
+
+       seq_printf(s, "%d\n", f->flow_id);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_id);
+
+static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+       struct mvpp2_port *port = entry->port;
+       struct mvpp2_cls_flow_entry fe;
+       struct mvpp2_cls_flow *f;
+       int flow_index;
+       u16 hash_opts;
+
+       f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+       if (!f)
+               return -EINVAL;
+
+       flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+
+       mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+       hash_opts = mvpp2_flow_get_hek_fields(&fe);
+
+       seq_printf(s, "0x%04x\n", hash_opts);
+
+       return 0;
+}
+
+static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
+                                              struct file *file)
+{
+       return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
+                          inode->i_private);
+}
+
+static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode,
+                                                 struct file *file)
+{
+       struct seq_file *seq = file->private_data;
+       struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private;
+
+       kfree(flow_entry);
+       return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
+       .open = mvpp2_dbgfs_port_flow_hash_opt_open,
+       .read = seq_read,
+       .release = mvpp2_dbgfs_port_flow_hash_opt_release,
+};
+
+static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
+       struct mvpp2_port *port = entry->port;
+       struct mvpp2_cls_flow_entry fe;
+       struct mvpp2_cls_flow *f;
+       int flow_index, engine;
+
+       f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
+       if (!f)
+               return -EINVAL;
+
+       flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+
+       mvpp2_cls_flow_read(port->priv, flow_index, &fe);
+
+       engine = mvpp2_cls_flow_eng_get(&fe);
+
+       seq_printf(s, "%d\n", engine);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
+
+static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_port *port = s->private;
+       u32 hits;
+
+       hits = mvpp2_cls_c2_hit_count(port->priv,
+                                     MVPP22_CLS_C2_RSS_ENTRY(port->id));
+
+       seq_printf(s, "%u\n", hits);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
+
+static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_port *port = s->private;
+       struct mvpp2_cls_c2_entry c2;
+       u8 qh, ql;
+
+       mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+       qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
+            MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+
+       ql = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QLOW_OFFS) &
+            MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+       seq_printf(s, "%d\n", (qh << 3 | ql));
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
+
+static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_port *port = s->private;
+       struct mvpp2_cls_c2_entry c2;
+       int enabled;
+
+       mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+
+       enabled = !!(c2.attr[2] | MVPP22_CLS_C2_ATTR2_RSS_EN);
+
+       seq_printf(s, "%d\n", enabled);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_enable);
+
+static int mvpp2_dbgfs_port_vid_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_port *port = s->private;
+       unsigned char byte[2], enable[2];
+       struct mvpp2 *priv = port->priv;
+       struct mvpp2_prs_entry pe;
+       unsigned long pmap;
+       u16 rvid;
+       int tid;
+
+       for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+            tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+               mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+               pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+               if (!priv->prs_shadow[tid].valid)
+                       continue;
+
+               if (!test_bit(port->id, &pmap))
+                       continue;
+
+               mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+               mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+
+               rvid = ((byte[0] & 0xf) << 8) + byte[1];
+
+               seq_printf(s, "%u\n", rvid);
+       }
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_vid);
+
+static int mvpp2_dbgfs_port_parser_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_port *port = s->private;
+       struct mvpp2 *priv = port->priv;
+       struct mvpp2_prs_entry pe;
+       unsigned long pmap;
+       int i;
+
+       for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+               mvpp2_prs_init_from_hw(port->priv, &pe, i);
+
+               pmap = mvpp2_prs_tcam_port_map_get(&pe);
+               if (priv->prs_shadow[i].valid && test_bit(port->id, &pmap))
+                       seq_printf(s, "%03d\n", i);
+       }
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_parser);
+
+static int mvpp2_dbgfs_filter_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_port *port = s->private;
+       struct mvpp2 *priv = port->priv;
+       struct mvpp2_prs_entry pe;
+       unsigned long pmap;
+       int index, tid;
+
+       for (tid = MVPP2_PE_MAC_RANGE_START;
+            tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+               unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+               if (!priv->prs_shadow[tid].valid ||
+                   priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC ||
+                   priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)
+                       continue;
+
+               mvpp2_prs_init_from_hw(priv, &pe, tid);
+
+               pmap = mvpp2_prs_tcam_port_map_get(&pe);
+
+               /* We only want entries active on this port */
+               if (!test_bit(port->id, &pmap))
+                       continue;
+
+               /* Read mac addr from entry */
+               for (index = 0; index < ETH_ALEN; index++)
+                       mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+                                                    &da_mask[index]);
+
+               seq_printf(s, "%pM\n", da);
+       }
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_filter);
+
+static int mvpp2_dbgfs_prs_lu_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_prs_entry *entry = s->private;
+       struct mvpp2 *priv = entry->priv;
+
+       seq_printf(s, "%x\n", priv->prs_shadow[entry->tid].lu);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_lu);
+
+static int mvpp2_dbgfs_prs_pmap_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_prs_entry *entry = s->private;
+       struct mvpp2_prs_entry pe;
+       unsigned int pmap;
+
+       mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+       pmap = mvpp2_prs_tcam_port_map_get(&pe);
+       pmap &= MVPP2_PRS_PORT_MASK;
+
+       seq_printf(s, "%02x\n", pmap);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_pmap);
+
+static int mvpp2_dbgfs_prs_ai_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_prs_entry *entry = s->private;
+       struct mvpp2_prs_entry pe;
+       unsigned char ai, ai_mask;
+
+       mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+       ai = pe.tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
+       ai_mask = (pe.tcam[MVPP2_PRS_TCAM_AI_WORD] >> 16) & MVPP2_PRS_AI_MASK;
+
+       seq_printf(s, "%02x %02x\n", ai, ai_mask);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_ai);
+
+static int mvpp2_dbgfs_prs_hdata_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_prs_entry *entry = s->private;
+       struct mvpp2_prs_entry pe;
+       unsigned char data[8], mask[8];
+       int i;
+
+       mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+       for (i = 0; i < 8; i++)
+               mvpp2_prs_tcam_data_byte_get(&pe, i, &data[i], &mask[i]);
+
+       seq_printf(s, "%*phN %*phN\n", 8, data, 8, mask);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hdata);
+
+static int mvpp2_dbgfs_prs_sram_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_prs_entry *entry = s->private;
+       struct mvpp2_prs_entry pe;
+
+       mvpp2_prs_init_from_hw(entry->priv, &pe, entry->tid);
+
+       seq_printf(s, "%*phN\n", 14, pe.sram);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_sram);
+
+static int mvpp2_dbgfs_prs_hits_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_prs_entry *entry = s->private;
+       int val;
+
+       val = mvpp2_prs_hits(entry->priv, entry->tid);
+       if (val < 0)
+               return val;
+
+       seq_printf(s, "%d\n", val);
+
+       return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_hits);
+
+static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
+{
+       struct mvpp2_dbgfs_prs_entry *entry = s->private;
+       struct mvpp2 *priv = entry->priv;
+       int tid = entry->tid;
+
+       seq_printf(s, "%d\n", priv->prs_shadow[tid].valid ? 1 : 0);
+
+       return 0;
+}
+
+static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
+}
+
+static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq = file->private_data;
+       struct mvpp2_dbgfs_prs_entry *entry = seq->private;
+
+       kfree(entry);
+       return single_release(inode, file);
+}
+
+static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
+       .open = mvpp2_dbgfs_prs_valid_open,
+       .read = seq_read,
+       .release = mvpp2_dbgfs_prs_valid_release,
+};
+
+static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
+                                     struct mvpp2_port *port,
+                                     struct mvpp2_dbgfs_flow_entry *entry)
+{
+       struct mvpp2_dbgfs_port_flow_entry *port_entry;
+       struct dentry *port_dir;
+
+       port_dir = debugfs_create_dir(port->dev->name, parent);
+       if (IS_ERR(port_dir))
+               return PTR_ERR(port_dir);
+
+       /* This will be freed by 'hash_opts' release op */
+       port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
+       if (!port_entry)
+               return -ENOMEM;
+
+       port_entry->port = port;
+       port_entry->dbg_fe = entry;
+
+       debugfs_create_file("hash_opts", 0444, port_dir, port_entry,
+                           &mvpp2_dbgfs_port_flow_hash_opt_fops);
+
+       debugfs_create_file("engine", 0444, port_dir, port_entry,
+                           &mvpp2_dbgfs_port_flow_engine_fops);
+
+       return 0;
+}
+
+static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
+                                      struct mvpp2 *priv, int flow)
+{
+       struct mvpp2_dbgfs_flow_entry *entry;
+       struct dentry *flow_entry_dir;
+       char flow_entry_name[10];
+       int i, ret;
+
+       sprintf(flow_entry_name, "%02d", flow);
+
+       flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
+       if (!flow_entry_dir)
+               return -ENOMEM;
+
+       /* This will be freed by 'type' release op */
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->flow = flow;
+       entry->priv = priv;
+
+       debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
+                           &mvpp2_dbgfs_flow_flt_hits_fops);
+
+       debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
+                           &mvpp2_dbgfs_flow_dec_hits_fops);
+
+       debugfs_create_file("type", 0444, flow_entry_dir, entry,
+                           &mvpp2_dbgfs_flow_type_fops);
+
+       debugfs_create_file("id", 0444, flow_entry_dir, entry,
+                           &mvpp2_dbgfs_flow_id_fops);
+
+       /* Create entry for each port */
+       for (i = 0; i < priv->port_count; i++) {
+               ret = mvpp2_dbgfs_flow_port_init(flow_entry_dir,
+                                                priv->port_list[i], entry);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
+{
+       struct dentry *flow_dir;
+       int i, ret;
+
+       flow_dir = debugfs_create_dir("flows", parent);
+       if (!flow_dir)
+               return -ENOMEM;
+
+       for (i = 0; i < MVPP2_N_FLOWS; i++) {
+               ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
+                                     struct mvpp2 *priv, int tid)
+{
+       struct mvpp2_dbgfs_prs_entry *entry;
+       struct dentry *prs_entry_dir;
+       char prs_entry_name[10];
+
+       if (tid >= MVPP2_PRS_TCAM_SRAM_SIZE)
+               return -EINVAL;
+
+       sprintf(prs_entry_name, "%03d", tid);
+
+       prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
+       if (!prs_entry_dir)
+               return -ENOMEM;
+
+       /* The 'valid' entry's ops will free that */
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->tid = tid;
+       entry->priv = priv;
+
+       /* Create each attr */
+       debugfs_create_file("sram", 0444, prs_entry_dir, entry,
+                           &mvpp2_dbgfs_prs_sram_fops);
+
+       debugfs_create_file("valid", 0644, prs_entry_dir, entry,
+                           &mvpp2_dbgfs_prs_valid_fops);
+
+       debugfs_create_file("lookup_id", 0644, prs_entry_dir, entry,
+                           &mvpp2_dbgfs_prs_lu_fops);
+
+       debugfs_create_file("ai", 0644, prs_entry_dir, entry,
+                           &mvpp2_dbgfs_prs_ai_fops);
+
+       debugfs_create_file("header_data", 0644, prs_entry_dir, entry,
+                           &mvpp2_dbgfs_prs_hdata_fops);
+
+       debugfs_create_file("hits", 0444, prs_entry_dir, entry,
+                           &mvpp2_dbgfs_prs_hits_fops);
+
+       return 0;
+}
+
+static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
+{
+       struct dentry *prs_dir;
+       int i, ret;
+
+       prs_dir = debugfs_create_dir("parser", parent);
+       if (!prs_dir)
+               return -ENOMEM;
+
+       for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+               ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int mvpp2_dbgfs_port_init(struct dentry *parent,
+                                struct mvpp2_port *port)
+{
+       struct dentry *port_dir;
+
+       port_dir = debugfs_create_dir(port->dev->name, parent);
+       if (IS_ERR(port_dir))
+               return PTR_ERR(port_dir);
+
+       debugfs_create_file("parser_entries", 0444, port_dir, port,
+                           &mvpp2_dbgfs_port_parser_fops);
+
+       debugfs_create_file("mac_filter", 0444, port_dir, port,
+                           &mvpp2_dbgfs_filter_fops);
+
+       debugfs_create_file("vid_filter", 0444, port_dir, port,
+                           &mvpp2_dbgfs_port_vid_fops);
+
+       debugfs_create_file("c2_hits", 0444, port_dir, port,
+                           &mvpp2_dbgfs_flow_c2_hits_fops);
+
+       debugfs_create_file("default_rxq", 0444, port_dir, port,
+                           &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+       debugfs_create_file("rss_enable", 0444, port_dir, port,
+                           &mvpp2_dbgfs_flow_c2_enable_fops);
+
+       return 0;
+}
+
+void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
+{
+       debugfs_remove_recursive(priv->dbgfs_dir);
+}
+
+void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
+{
+       struct dentry *mvpp2_dir, *mvpp2_root;
+       int ret, i;
+
+       mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
+       if (!mvpp2_root) {
+               mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
+               if (IS_ERR(mvpp2_root))
+                       return;
+       }
+
+       mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
+       if (IS_ERR(mvpp2_dir))
+               return;
+
+       priv->dbgfs_dir = mvpp2_dir;
+
+       ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
+       if (ret)
+               goto err;
+
+       for (i = 0; i < priv->port_count; i++) {
+               ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
+               if (ret)
+                       goto err;
+       }
+
+       ret = mvpp2_dbgfs_flow_init(mvpp2_dir, priv);
+       if (ret)
+               goto err;
+
+       return;
+err:
+       mvpp2_dbgfs_cleanup(priv);
+}
index 0319ed9ef8b815518a490cdd098018fcda46738a..32d785b616e1e270f2adb47978ce3b3f172f02ac 100644 (file)
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Driver for Marvell PPv2 network controller for Armada 375 SoC.
  *
  * Copyright (C) 2014 Marvell
  *
  * Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <linux/acpi.h>
@@ -66,7 +63,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
 #define MVPP2_QDIST_SINGLE_MODE        0
 #define MVPP2_QDIST_MULTI_MODE 1
 
-static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
+static int queue_mode = MVPP2_QDIST_MULTI_MODE;
 
 module_param(queue_mode, int, 0444);
 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
@@ -151,9 +148,10 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
                                            struct mvpp2_tx_desc *tx_desc)
 {
        if (port->priv->hw_version == MVPP21)
-               return tx_desc->pp21.buf_dma_addr;
+               return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
        else
-               return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
+               return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
+                      MVPP2_DESC_DMA_MASK;
 }
 
 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
@@ -166,12 +164,12 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
        offset = dma_addr & MVPP2_TX_DESC_ALIGN;
 
        if (port->priv->hw_version == MVPP21) {
-               tx_desc->pp21.buf_dma_addr = addr;
+               tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
                tx_desc->pp21.packet_offset = offset;
        } else {
-               u64 val = (u64)addr;
+               __le64 val = cpu_to_le64(addr);
 
-               tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
+               tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
                tx_desc->pp22.buf_dma_addr_ptp |= val;
                tx_desc->pp22.packet_offset = offset;
        }
@@ -181,9 +179,9 @@ static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
                                    struct mvpp2_tx_desc *tx_desc)
 {
        if (port->priv->hw_version == MVPP21)
-               return tx_desc->pp21.data_size;
+               return le16_to_cpu(tx_desc->pp21.data_size);
        else
-               return tx_desc->pp22.data_size;
+               return le16_to_cpu(tx_desc->pp22.data_size);
 }
 
 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
@@ -191,9 +189,9 @@ static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
                                  size_t size)
 {
        if (port->priv->hw_version == MVPP21)
-               tx_desc->pp21.data_size = size;
+               tx_desc->pp21.data_size = cpu_to_le16(size);
        else
-               tx_desc->pp22.data_size = size;
+               tx_desc->pp22.data_size = cpu_to_le16(size);
 }
 
 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
@@ -211,9 +209,9 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
                                 unsigned int command)
 {
        if (port->priv->hw_version == MVPP21)
-               tx_desc->pp21.command = command;
+               tx_desc->pp21.command = cpu_to_le32(command);
        else
-               tx_desc->pp22.command = command;
+               tx_desc->pp22.command = cpu_to_le32(command);
 }
 
 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
@@ -229,36 +227,38 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
                                            struct mvpp2_rx_desc *rx_desc)
 {
        if (port->priv->hw_version == MVPP21)
-               return rx_desc->pp21.buf_dma_addr;
+               return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
        else
-               return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
+               return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
+                      MVPP2_DESC_DMA_MASK;
 }
 
 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
                                             struct mvpp2_rx_desc *rx_desc)
 {
        if (port->priv->hw_version == MVPP21)
-               return rx_desc->pp21.buf_cookie;
+               return le32_to_cpu(rx_desc->pp21.buf_cookie);
        else
-               return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
+               return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
+                      MVPP2_DESC_DMA_MASK;
 }
 
 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
                                    struct mvpp2_rx_desc *rx_desc)
 {
        if (port->priv->hw_version == MVPP21)
-               return rx_desc->pp21.data_size;
+               return le16_to_cpu(rx_desc->pp21.data_size);
        else
-               return rx_desc->pp22.data_size;
+               return le16_to_cpu(rx_desc->pp22.data_size);
 }
 
 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
                                   struct mvpp2_rx_desc *rx_desc)
 {
        if (port->priv->hw_version == MVPP21)
-               return rx_desc->pp21.status;
+               return le32_to_cpu(rx_desc->pp21.status);
        else
-               return rx_desc->pp22.status;
+               return le32_to_cpu(rx_desc->pp22.status);
 }
 
 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
@@ -1735,7 +1735,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
        command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
        command |= MVPP2_TXD_IP_CSUM_DISABLE;
 
-       if (l3_proto == swab16(ETH_P_IP)) {
+       if (l3_proto == htons(ETH_P_IP)) {
                command &= ~MVPP2_TXD_IP_CSUM_DISABLE;  /* enable IPv4 csum */
                command &= ~MVPP2_TXD_L3_IP6;           /* enable IPv4 */
        } else {
@@ -3273,6 +3273,11 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
        }
 }
 
+static bool mvpp22_rss_is_supported(void)
+{
+       return queue_mode == MVPP2_QDIST_MULTI_MODE;
+}
+
 static int mvpp2_open(struct net_device *dev)
 {
        struct mvpp2_port *port = netdev_priv(dev);
@@ -3365,9 +3370,6 @@ static int mvpp2_open(struct net_device *dev)
 
        mvpp2_start_dev(port);
 
-       if (priv->hw_version == MVPP22)
-               mvpp22_init_rss(port);
-
        /* Start hardware statistics gathering */
        queue_delayed_work(priv->stats_queue, &port->stats_work,
                           MVPP2_MIB_COUNTERS_STATS_DELAY);
@@ -3626,6 +3628,13 @@ static int mvpp2_set_features(struct net_device *dev,
                }
        }
 
+       if (changed & NETIF_F_RXHASH) {
+               if (features & NETIF_F_RXHASH)
+                       mvpp22_rss_enable(port);
+               else
+                       mvpp22_rss_disable(port);
+       }
+
        return 0;
 }
 
@@ -3813,6 +3822,94 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
        return phylink_ethtool_ksettings_set(port->phylink, cmd);
 }
 
+static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
+                                  struct ethtool_rxnfc *info, u32 *rules)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       int ret = 0;
+
+       if (!mvpp22_rss_is_supported())
+               return -EOPNOTSUPP;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXFH:
+               ret = mvpp2_ethtool_rxfh_get(port, info);
+               break;
+       case ETHTOOL_GRXRINGS:
+               info->data = port->nrxqs;
+               break;
+       default:
+               return -ENOTSUPP;
+       }
+
+       return ret;
+}
+
+static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
+                                  struct ethtool_rxnfc *info)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       int ret = 0;
+
+       if (!mvpp22_rss_is_supported())
+               return -EOPNOTSUPP;
+
+       switch (info->cmd) {
+       case ETHTOOL_SRXFH:
+               ret = mvpp2_ethtool_rxfh_set(port, info);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return ret;
+}
+
+static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
+{
+       return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
+}
+
+static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+                                 u8 *hfunc)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       if (!mvpp22_rss_is_supported())
+               return -EOPNOTSUPP;
+
+       if (indir)
+               memcpy(indir, port->indir,
+                      ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_CRC32;
+
+       return 0;
+}
+
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
+                                 const u8 *key, const u8 hfunc)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       if (!mvpp22_rss_is_supported())
+               return -EOPNOTSUPP;
+
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+               return -EOPNOTSUPP;
+
+       if (key)
+               return -EOPNOTSUPP;
+
+       if (indir) {
+               memcpy(port->indir, indir,
+                      ARRAY_SIZE(port->indir) * sizeof(port->indir[0]));
+               mvpp22_rss_fill_table(port, port->id);
+       }
+
+       return 0;
+}
+
 /* Device ops */
 
 static const struct net_device_ops mvpp2_netdev_ops = {
@@ -3844,6 +3941,12 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
        .set_pauseparam         = mvpp2_ethtool_set_pause_param,
        .get_link_ksettings     = mvpp2_ethtool_get_link_ksettings,
        .set_link_ksettings     = mvpp2_ethtool_set_link_ksettings,
+       .get_rxnfc              = mvpp2_ethtool_get_rxnfc,
+       .set_rxnfc              = mvpp2_ethtool_set_rxnfc,
+       .get_rxfh_indir_size    = mvpp2_ethtool_get_rxfh_indir_size,
+       .get_rxfh               = mvpp2_ethtool_get_rxfh,
+       .set_rxfh               = mvpp2_ethtool_set_rxfh,
+
 };
 
 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -3985,8 +4088,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
            MVPP2_MAX_PORTS * priv->max_port_rxqs)
                return -EINVAL;
 
-       if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
-           (port->ntxqs > MVPP2_MAX_TXQ))
+       if (port->nrxqs % MVPP2_DEFAULT_RXQ ||
+           port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
                return -EINVAL;
 
        /* Disable port */
@@ -4075,6 +4178,9 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        mvpp2_cls_oversize_rxq_set(port);
        mvpp2_cls_port_config(port);
 
+       if (mvpp22_rss_is_supported())
+               mvpp22_rss_port_init(port);
+
        /* Provide an initial Rx packet size */
        port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
 
@@ -4681,6 +4787,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
                            NETIF_F_HW_VLAN_CTAG_FILTER;
 
+       if (mvpp22_rss_is_supported())
+               dev->hw_features |= NETIF_F_RXHASH;
+
        if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
                dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
                dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
@@ -5011,6 +5120,12 @@ static int mvpp2_probe(struct platform_device *pdev)
                        (unsigned long)of_device_get_match_data(&pdev->dev);
        }
 
+       /* multi queue mode isn't supported on PPV2.1, fallback to single
+        * mode
+        */
+       if (priv->hw_version == MVPP21)
+               queue_mode = MVPP2_QDIST_SINGLE_MODE;
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
@@ -5174,6 +5289,8 @@ static int mvpp2_probe(struct platform_device *pdev)
                goto err_port_probe;
        }
 
+       mvpp2_dbgfs_init(priv, pdev->name);
+
        platform_set_drvdata(pdev, priv);
        return 0;
 
@@ -5207,6 +5324,8 @@ static int mvpp2_remove(struct platform_device *pdev)
        struct fwnode_handle *port_fwnode;
        int i = 0;
 
+       mvpp2_dbgfs_cleanup(priv);
+
        flush_workqueue(priv->stats_queue);
        destroy_workqueue(priv->stats_queue);
 
index 6bb69f086794ffdc16af81418aeeadc9766f9cf3..392fd895f27826e81153f230603fa37b8e921fcc 100644 (file)
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Header Parser helpers for Marvell PPv2 Network Controller
  *
  * Copyright (C) 2014 Marvell
  *
  * Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
 #include <linux/kernel.h>
@@ -30,24 +27,24 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
                return -EINVAL;
 
        /* Clear entry invalidation bit */
-       pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+       pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
 
        /* Write tcam index - indirect access */
        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
-               mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+               mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
 
        /* Write sram index - indirect access */
        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
-               mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+               mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
 
        return 0;
 }
 
 /* Initialize tcam entry from hw */
-static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
-                                 struct mvpp2_prs_entry *pe, int tid)
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+                          int tid)
 {
        int i;
 
@@ -60,18 +57,18 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
        /* Write tcam index - indirect access */
        mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
 
-       pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
+       pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
                              MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
-       if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+       if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
                return MVPP2_PRS_TCAM_ENTRY_INVALID;
 
        for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
-               pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
+               pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
 
        /* Write sram index - indirect access */
        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
-               pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
+               pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
 
        return 0;
 }
@@ -103,42 +100,35 @@ static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
 /* Update lookup field in tcam sw entry */
 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
 {
-       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
-
-       pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
-       pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
+       pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
+       pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
+       pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
+       pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
 }
 
 /* Update mask for single port in tcam sw entry */
 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
                                    unsigned int port, bool add)
 {
-       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
        if (add)
-               pe->tcam.byte[enable_off] &= ~(1 << port);
+               pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
        else
-               pe->tcam.byte[enable_off] |= 1 << port;
+               pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
 }
 
 /* Update port map in tcam sw entry */
 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
                                        unsigned int ports)
 {
-       unsigned char port_mask = MVPP2_PRS_PORT_MASK;
-       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
-       pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
-       pe->tcam.byte[enable_off] &= ~port_mask;
-       pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
+       pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
+       pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
+       pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
 }
 
 /* Obtain port map from tcam sw entry */
-static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
 {
-       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
-
-       return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+       return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
 }
 
 /* Set byte of data and its enable bits in tcam sw entry */
@@ -146,55 +136,58 @@ static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
                                         unsigned int offs, unsigned char byte,
                                         unsigned char enable)
 {
-       pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
-       pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+       int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+       pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
+       pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
+       pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
+       pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
 }
 
 /* Get byte of data and its enable bits from tcam sw entry */
-static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
-                                        unsigned int offs, unsigned char *byte,
-                                        unsigned char *enable)
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+                                 unsigned int offs, unsigned char *byte,
+                                 unsigned char *enable)
 {
-       *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
-       *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+       int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
+
+       *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
+       *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
 }
 
 /* Compare tcam data bytes with a pattern */
 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
                                    u16 data)
 {
-       int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
        u16 tcam_data;
 
-       tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
-       if (tcam_data != data)
-               return false;
-       return true;
+       tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
+       return tcam_data == data;
 }
 
 /* Update ai bits in tcam sw entry */
 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
                                     unsigned int bits, unsigned int enable)
 {
-       int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+       int i;
 
        for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
                if (!(enable & BIT(i)))
                        continue;
 
                if (bits & BIT(i))
-                       pe->tcam.byte[ai_idx] |= 1 << i;
+                       pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
                else
-                       pe->tcam.byte[ai_idx] &= ~(1 << i);
+                       pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
        }
 
-       pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
+       pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
 }
 
 /* Get ai bits from tcam sw entry */
 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
 {
-       return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+       return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
 }
 
 /* Set ethertype in tcam sw entry */
@@ -215,16 +208,16 @@ static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
 
 /* Set bits in sram sw entry */
 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
-                                   int val)
+                                   u32 val)
 {
-       pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+       pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
 }
 
 /* Clear bits in sram sw entry */
 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
-                                     int val)
+                                     u32 val)
 {
-       pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+       pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
 }
 
 /* Update ri bits in sram sw entry */
@@ -234,15 +227,16 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
        unsigned int i;
 
        for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
-               int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
-
                if (!(mask & BIT(i)))
                        continue;
 
                if (bits & BIT(i))
-                       mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
+                       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
+                                               1);
                else
-                       mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
+                       mvpp2_prs_sram_bits_clear(pe,
+                                                 MVPP2_PRS_SRAM_RI_OFFS + i,
+                                                 1);
 
                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
        }
@@ -251,7 +245,7 @@ static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
 /* Obtain ri bits from sram sw entry */
 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
 {
-       return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+       return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
 }
 
 /* Update ai bits in sram sw entry */
@@ -259,16 +253,18 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
                                     unsigned int bits, unsigned int mask)
 {
        unsigned int i;
-       int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
 
        for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
                if (!(mask & BIT(i)))
                        continue;
 
                if (bits & BIT(i))
-                       mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
+                       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
+                                               1);
                else
-                       mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
+                       mvpp2_prs_sram_bits_clear(pe,
+                                                 MVPP2_PRS_SRAM_AI_OFFS + i,
+                                                 1);
 
                mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
        }
@@ -278,12 +274,12 @@ static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
 {
        u8 bits;
-       int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
-       int ai_en_off = ai_off + 1;
-       int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+       /* ai is stored on bits 90->97; so it spreads across two u32 */
+       int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
+       int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
 
-       bits = (pe->sram.byte[ai_off] >> ai_shift) |
-              (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+       bits = (pe->sram[ai_off] >> ai_shift) |
+              (pe->sram[ai_off + 1] << (32 - ai_shift));
 
        return bits;
 }
@@ -316,8 +312,7 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
        }
 
        /* Set value */
-       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
-                                                          (unsigned char)shift;
+       pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
 
        /* Reset and set operation */
        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
@@ -346,13 +341,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
        /* Set value */
        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
                                  MVPP2_PRS_SRAM_UDF_MASK);
-       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
-       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
-                                       MVPP2_PRS_SRAM_UDF_BITS)] &=
-             ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
-       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
-                                       MVPP2_PRS_SRAM_UDF_BITS)] |=
-                               (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+                               offset & MVPP2_PRS_SRAM_UDF_MASK);
 
        /* Set offset type */
        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
@@ -362,16 +352,8 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
        /* Set offset operation */
        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
                                  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
-       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
-
-       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
-                                       MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
-                                            ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
-                                   (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
-
-       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
-                                       MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
-                            (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+                               op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
 
        /* Set base offset as current */
        mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
@@ -662,7 +644,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
                        continue;
 
                mvpp2_prs_init_from_hw(priv, &pe, tid);
-               match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
+               match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
                if (!match)
                        continue;
 
@@ -790,8 +772,8 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
 
                mvpp2_prs_init_from_hw(priv, &pe, tid);
 
-               match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
-                       mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
+               match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
+                       mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
 
                if (!match)
                        continue;
@@ -932,8 +914,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
 
        pe.index = tid;
        /* Clear ri before updating */
-       pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
-       pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+       pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+       pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
        mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
 
        mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
@@ -1433,17 +1415,13 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
 
        pe.index = tid;
 
-       /* Clear tcam data before updating */
-       pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
-       pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
-
        mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
                                     MVPP2_PRS_IPV4_HEAD,
                                     MVPP2_PRS_IPV4_HEAD_MASK);
 
        /* Clear ri before updating */
-       pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
-       pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+       pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+       pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
                                 MVPP2_PRS_RI_L3_PROTO_MASK);
 
@@ -1644,8 +1622,8 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
                                     MVPP2_PRS_IPV4_IHL_MASK);
 
        /* Clear ri before updating */
-       pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
-       pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+       pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+       pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
                                 MVPP2_PRS_RI_L3_PROTO_MASK);
 
@@ -2428,6 +2406,41 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
        return 0;
 }
 
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
+{
+       struct mvpp2_prs_entry pe;
+       u8 *ri_byte, *ri_byte_mask;
+       int tid, i;
+
+       memset(&pe, 0, sizeof(pe));
+
+       tid = mvpp2_prs_tcam_first_free(priv,
+                                       MVPP2_PE_LAST_FREE_TID,
+                                       MVPP2_PE_FIRST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       pe.index = tid;
+
+       ri_byte = (u8 *)&ri;
+       ri_byte_mask = (u8 *)&ri_mask;
+
+       mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+       for (i = 0; i < 4; i++) {
+               mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
+                                            ri_byte_mask[i]);
+       }
+
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
 /* Set prs flow for the port */
 int mvpp2_prs_def_flow(struct mvpp2_port *port)
 {
@@ -2465,3 +2478,19 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
 
        return 0;
 }
+
+int mvpp2_prs_hits(struct mvpp2 *priv, int index)
+{
+       u32 val;
+
+       if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
+               return -EINVAL;
+
+       mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
+
+       val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
+
+       val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+
+       return val;
+}
index 22fbbc4c8b2805ad7545978864bae24ae862b423..e22f6c85d380346312147daf531bf6c3626e9589 100644 (file)
@@ -1,22 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Header Parser definitions for Marvell PPv2 Network Controller
  *
  * Copyright (C) 2014 Marvell
  *
  * Marcin Wojtas <mw@semihalf.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
+#ifndef _MVPP2_PRS_H_
+#define _MVPP2_PRS_H_
+
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
+#include <linux/platform_device.h>
 
 #include "mvpp2.h"
 
-#ifndef _MVPP2_PRS_H_
-#define _MVPP2_PRS_H_
-
 /* Parser constants */
 #define MVPP2_PRS_TCAM_SRAM_SIZE       256
 #define MVPP2_PRS_TCAM_WORDS           6
  * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
  */
 #define MVPP2_PRS_AI_BITS                      8
+#define MVPP2_PRS_AI_MASK                      0xff
 #define MVPP2_PRS_PORT_MASK                    0xff
 #define MVPP2_PRS_LU_MASK                      0xf
-#define MVPP2_PRS_TCAM_DATA_BYTE(offs)         \
-                                   (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
-#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)      \
-                                             (((offs) * 2) - ((offs) % 2)  + 2)
-#define MVPP2_PRS_TCAM_AI_BYTE                 16
-#define MVPP2_PRS_TCAM_PORT_BYTE               17
-#define MVPP2_PRS_TCAM_LU_BYTE                 20
-#define MVPP2_PRS_TCAM_EN_OFFS(offs)           ((offs) + 2)
-#define MVPP2_PRS_TCAM_INV_WORD                        5
+
+/* TCAM entries in registers are accessed using 16 data bits + 16 enable bits */
+#define MVPP2_PRS_BYTE_TO_WORD(byte)   ((byte) / 2)
+#define MVPP2_PRS_BYTE_IN_WORD(byte)   ((byte) % 2)
+
+#define MVPP2_PRS_TCAM_EN(data)                ((data) << 16)
+#define MVPP2_PRS_TCAM_AI_WORD         4
+#define MVPP2_PRS_TCAM_AI(ai)          (ai)
+#define MVPP2_PRS_TCAM_AI_EN(ai)       MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_AI(ai))
+#define MVPP2_PRS_TCAM_PORT_WORD       4
+#define MVPP2_PRS_TCAM_PORT(p)         ((p) << 8)
+#define MVPP2_PRS_TCAM_PORT_EN(p)      MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_PORT(p))
+#define MVPP2_PRS_TCAM_LU_WORD         5
+#define MVPP2_PRS_TCAM_LU(lu)          (lu)
+#define MVPP2_PRS_TCAM_LU_EN(lu)       MVPP2_PRS_TCAM_EN(MVPP2_PRS_TCAM_LU(lu))
+#define MVPP2_PRS_TCAM_INV_WORD                5
 
 #define MVPP2_PRS_VID_TCAM_BYTE         2
 
 #define MVPP2_PRS_SRAM_RI_CTRL_BITS            32
 #define MVPP2_PRS_SRAM_SHIFT_OFFS              64
 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT          72
+#define MVPP2_PRS_SRAM_SHIFT_MASK              0xff
 #define MVPP2_PRS_SRAM_UDF_OFFS                        73
 #define MVPP2_PRS_SRAM_UDF_BITS                        8
 #define MVPP2_PRS_SRAM_UDF_MASK                        0xff
 #define MVPP2_PRS_RI_UDF7_IP6_LITE             BIT(29)
 #define MVPP2_PRS_RI_DROP_MASK                 0x80000000
 
+#define MVPP2_PRS_IP_MASK                      (MVPP2_PRS_RI_L3_PROTO_MASK | \
+                                               MVPP2_PRS_RI_IP_FRAG_MASK | \
+                                               MVPP2_PRS_RI_L4_PROTO_MASK)
+
 /* Sram additional info bits assignment */
 #define MVPP2_PRS_IPV4_DIP_AI_BIT              BIT(0)
 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT           BIT(0)
@@ -255,20 +266,15 @@ enum mvpp2_prs_lookup {
        MVPP2_PRS_LU_LAST,
 };
 
-union mvpp2_prs_tcam_entry {
-       u32 word[MVPP2_PRS_TCAM_WORDS];
-       u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
-};
-
-union mvpp2_prs_sram_entry {
-       u32 word[MVPP2_PRS_SRAM_WORDS];
-       u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
-};
-
 struct mvpp2_prs_entry {
        u32 index;
-       union mvpp2_prs_tcam_entry tcam;
-       union mvpp2_prs_sram_entry sram;
+       u32 tcam[MVPP2_PRS_TCAM_WORDS];
+       u32 sram[MVPP2_PRS_SRAM_WORDS];
+};
+
+struct mvpp2_prs_result_info {
+       u32 ri;
+       u32 ri_mask;
 };
 
 struct mvpp2_prs_shadow {
@@ -288,10 +294,21 @@ struct mvpp2_prs_shadow {
 
 int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv);
 
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
+                          int tid);
+
+unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe);
+
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+                                 unsigned int offs, unsigned char *byte,
+                                 unsigned char *enable);
+
 int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add);
 
 int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type);
 
+int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask);
+
 int mvpp2_prs_def_flow(struct mvpp2_port *port);
 
 void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port);
@@ -311,4 +328,6 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port);
 
 int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da);
 
+int mvpp2_prs_hits(struct mvpp2 *priv, int index);
+
 #endif
index 16b10d01fcf4bc826fc5d8c7d522cb32c38ab8d9..3f400770fcd8fb8251ed28877d757d75dad52f53 100644 (file)
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX4_CORE)         += mlx4_core.o
 
 mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
                main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
-               srq.o resource_tracker.o
+               srq.o resource_tracker.o crdump.o
 
 obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 
index e2b6b0cac1acf9dad8168587743ccc5e871a7c92..c81d15bf259c83a8baf11c80d9857cd8759e3b8d 100644 (file)
@@ -178,10 +178,12 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
 
        dev = persist->dev;
        mlx4_err(dev, "device is going to be reset\n");
-       if (mlx4_is_slave(dev))
+       if (mlx4_is_slave(dev)) {
                err = mlx4_reset_slave(dev);
-       else
+       } else {
+               mlx4_crdump_collect(dev);
                err = mlx4_reset_master(dev);
+       }
 
        if (!err) {
                mlx4_err(dev, "device was reset successfully\n");
@@ -212,7 +214,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
        mutex_lock(&persist->interface_state_mutex);
        if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
            !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
-               err = mlx4_restart_one(persist->pdev);
+               err = mlx4_restart_one(persist->pdev, false, NULL);
                mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
                          err);
        }
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
new file mode 100644 (file)
index 0000000..88316c7
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx4.h"
+
+#define BAD_ACCESS                     0xBADACCE5
+#define HEALTH_BUFFER_SIZE             0x40
+#define CR_ENABLE_BIT                  swab32(BIT(6))
+#define CR_ENABLE_BIT_OFFSET           0xF3F04
+#define MAX_NUM_OF_DUMPS_TO_STORE      (8)
+
+static const char *region_cr_space_str = "cr-space";
+static const char *region_fw_health_str = "fw-health";
+
+/* Set to true in case cr enable bit was set to true before crdump */
+static bool crdump_enbale_bit_set;
+
+static void crdump_enable_crspace_access(struct mlx4_dev *dev,
+                                        u8 __iomem *cr_space)
+{
+       /* Get current enable bit value */
+       crdump_enbale_bit_set =
+               readl(cr_space + CR_ENABLE_BIT_OFFSET) & CR_ENABLE_BIT;
+
+       /* Enable FW CR filter (set bit6 to 0) */
+       if (crdump_enbale_bit_set)
+               writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) & ~CR_ENABLE_BIT,
+                      cr_space + CR_ENABLE_BIT_OFFSET);
+
+       /* Enable block volatile crspace accesses */
+       writel(swab32(1), cr_space + dev->caps.health_buffer_addrs +
+              HEALTH_BUFFER_SIZE);
+}
+
+static void crdump_disable_crspace_access(struct mlx4_dev *dev,
+                                         u8 __iomem *cr_space)
+{
+       /* Disable block volatile crspace accesses */
+       writel(0, cr_space + dev->caps.health_buffer_addrs +
+              HEALTH_BUFFER_SIZE);
+
+       /* Restore FW CR filter value (set bit6 to original value) */
+       if (crdump_enbale_bit_set)
+               writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) | CR_ENABLE_BIT,
+                      cr_space + CR_ENABLE_BIT_OFFSET);
+}
+
+static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
+                                       u8 __iomem *cr_space,
+                                       u32 id)
+{
+       struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+       struct pci_dev *pdev = dev->persist->pdev;
+       unsigned long cr_res_size;
+       u8 *crspace_data;
+       int offset;
+       int err;
+
+       if (!crdump->region_crspace) {
+               mlx4_err(dev, "crdump: cr-space region is NULL\n");
+               return;
+       }
+
+       /* Try to collect CR space */
+       cr_res_size = pci_resource_len(pdev, 0);
+       crspace_data = kvmalloc(cr_res_size, GFP_KERNEL);
+       if (crspace_data) {
+               for (offset = 0; offset < cr_res_size; offset += 4)
+                       *(u32 *)(crspace_data + offset) =
+                                       readl(cr_space + offset);
+
+               err = devlink_region_snapshot_create(crdump->region_crspace,
+                                                    cr_res_size, crspace_data,
+                                                    id, &kvfree);
+               if (err) {
+                       kvfree(crspace_data);
+                       mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
+                                 region_cr_space_str, id, err);
+               } else {
+                       mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
+                                 id, region_cr_space_str);
+               }
+       } else {
+               mlx4_err(dev, "crdump: Failed to allocate crspace buffer\n");
+       }
+}
+
+static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
+                                         u8 __iomem *cr_space,
+                                         u32 id)
+{
+       struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+       u8 *health_data;
+       int offset;
+       int err;
+
+       if (!crdump->region_fw_health) {
+               mlx4_err(dev, "crdump: fw-health region is NULL\n");
+               return;
+       }
+
+       /* Try to collect health buffer */
+       health_data = kvmalloc(HEALTH_BUFFER_SIZE, GFP_KERNEL);
+       if (health_data) {
+               u8 __iomem *health_buf_start =
+                               cr_space + dev->caps.health_buffer_addrs;
+
+               for (offset = 0; offset < HEALTH_BUFFER_SIZE; offset += 4)
+                       *(u32 *)(health_data + offset) =
+                                       readl(health_buf_start + offset);
+
+               err = devlink_region_snapshot_create(crdump->region_fw_health,
+                                                    HEALTH_BUFFER_SIZE,
+                                                    health_data,
+                                                    id, &kvfree);
+               if (err) {
+                       kvfree(health_data);
+                       mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
+                                 region_fw_health_str, id, err);
+               } else {
+                       mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
+                                 id, region_fw_health_str);
+               }
+       } else {
+               mlx4_err(dev, "crdump: Failed to allocate health buffer\n");
+       }
+}
+
+int mlx4_crdump_collect(struct mlx4_dev *dev)
+{
+       struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
+       struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+       struct pci_dev *pdev = dev->persist->pdev;
+       unsigned long cr_res_size;
+       u8 __iomem *cr_space;
+       u32 id;
+
+       if (!dev->caps.health_buffer_addrs) {
+               mlx4_info(dev, "crdump: FW doesn't support health buffer access, skipping\n");
+               return 0;
+       }
+
+       if (!crdump->snapshot_enable) {
+               mlx4_info(dev, "crdump: devlink snapshot disabled, skipping\n");
+               return 0;
+       }
+
+       cr_res_size = pci_resource_len(pdev, 0);
+
+       cr_space = ioremap(pci_resource_start(pdev, 0), cr_res_size);
+       if (!cr_space) {
+               mlx4_err(dev, "crdump: Failed to map pci cr region\n");
+               return -ENODEV;
+       }
+
+       crdump_enable_crspace_access(dev, cr_space);
+
+       /* Get the available snapshot ID for the dumps */
+       id = devlink_region_shapshot_id_get(devlink);
+
+       /* Try to capture dumps */
+       mlx4_crdump_collect_crspace(dev, cr_space, id);
+       mlx4_crdump_collect_fw_health(dev, cr_space, id);
+
+       crdump_disable_crspace_access(dev, cr_space);
+
+       iounmap(cr_space);
+       return 0;
+}
+
+int mlx4_crdump_init(struct mlx4_dev *dev)
+{
+       struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
+       struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+       struct pci_dev *pdev = dev->persist->pdev;
+
+       crdump->snapshot_enable = false;
+
+       /* Create cr-space region */
+       crdump->region_crspace =
+               devlink_region_create(devlink,
+                                     region_cr_space_str,
+                                     MAX_NUM_OF_DUMPS_TO_STORE,
+                                     pci_resource_len(pdev, 0));
+       if (IS_ERR(crdump->region_crspace))
+               mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
+                         region_cr_space_str,
+                         PTR_ERR(crdump->region_crspace));
+
+       /* Create fw-health region */
+       crdump->region_fw_health =
+               devlink_region_create(devlink,
+                                     region_fw_health_str,
+                                     MAX_NUM_OF_DUMPS_TO_STORE,
+                                     HEALTH_BUFFER_SIZE);
+       if (IS_ERR(crdump->region_fw_health))
+               mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
+                         region_fw_health_str,
+                         PTR_ERR(crdump->region_fw_health));
+
+       return 0;
+}
+
+void mlx4_crdump_end(struct mlx4_dev *dev)
+{
+       struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+
+       devlink_region_destroy(crdump->region_fw_health);
+       devlink_region_destroy(crdump->region_crspace);
+}
index 65eb06e017e401237842503bc3aabad3780c1a2e..6785661d1a72627d7cc6895359e0ece284577d96 100644 (file)
@@ -2926,7 +2926,6 @@ static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
                return mlx4_xdp_set(dev, xdp->prog);
        case XDP_QUERY_PROG:
                xdp->prog_id = mlx4_xdp_query(dev);
-               xdp->prog_attached = !!xdp->prog_id;
                return 0;
        default:
                return -EINVAL;
index 0227786308af5d70bdfbb19da3fb8d5760d0651f..1857ee0f0871d48285a6d3711f7c3e9a1e08a05f 100644 (file)
@@ -688,15 +688,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
 }
 
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
-                        void *accel_priv, select_queue_fallback_t fallback)
+                        struct net_device *sb_dev,
+                        select_queue_fallback_t fallback)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        u16 rings_p_up = priv->num_tx_rings_p_up;
 
        if (netdev_get_num_tc(dev))
-               return fallback(dev, skb);
+               return fallback(dev, skb, NULL);
 
-       return fallback(dev, skb) % rings_p_up;
+       return fallback(dev, skb, NULL) % rings_p_up;
 }
 
 static void mlx4_bf_copy(void __iomem *dst, const void *src,
index 46dcbfbe4c5eb0133ce49c2d222ebd7a2c8e7d44..babcfd9c0571fc6ffac47bd222304b62c65824c1 100644 (file)
@@ -825,7 +825,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
-
+#define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET     0xe4
 
        dev_cap->flags2 = 0;
        mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1082,6 +1082,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev_cap->rl_caps.min_unit = size >> 14;
        }
 
+       MLX4_GET(dev_cap->health_buffer_addrs, outbox,
+                QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET);
+
        MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
        if (field32 & (1 << 16))
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
index cd6399c76bfdb51887a1eba84150b642ef03cc30..650ae08c71def539ed50db6103a6c036170d1eae 100644 (file)
@@ -128,6 +128,7 @@ struct mlx4_dev_cap {
        u32 dmfs_high_rate_qpn_base;
        u32 dmfs_high_rate_qpn_range;
        struct mlx4_rate_limit_caps rl_caps;
+       u32 health_buffer_addrs;
        struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
        bool wol_port[MLX4_MAX_PORTS + 1];
 };
index 872014702fc1b0def72197f9e1b505849a5c72da..2d979a652b7b08d0e5290b1b972dd0fbf3d9d114 100644 (file)
@@ -177,6 +177,131 @@ struct mlx4_port_config {
 
 static atomic_t pf_loading = ATOMIC_INIT(0);
 
+static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
+                                      struct devlink_param_gset_ctx *ctx)
+{
+       ctx->val.vbool = !!mlx4_internal_err_reset;
+       return 0;
+}
+
+static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
+                                      struct devlink_param_gset_ctx *ctx)
+{
+       mlx4_internal_err_reset = ctx->val.vbool;
+       return 0;
+}
+
+static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
+                                           struct devlink_param_gset_ctx *ctx)
+{
+       struct mlx4_priv *priv = devlink_priv(devlink);
+       struct mlx4_dev *dev = &priv->dev;
+
+       ctx->val.vbool = dev->persist->crdump.snapshot_enable;
+       return 0;
+}
+
+static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
+                                           struct devlink_param_gset_ctx *ctx)
+{
+       struct mlx4_priv *priv = devlink_priv(devlink);
+       struct mlx4_dev *dev = &priv->dev;
+
+       dev->persist->crdump.snapshot_enable = ctx->val.vbool;
+       return 0;
+}
+
+static int
+mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id,
+                              union devlink_param_value val,
+                              struct netlink_ext_ack *extack)
+{
+       u32 value = val.vu32;
+
+       if (value < 1 || value > 128)
+               return -ERANGE;
+
+       if (!is_power_of_2(value)) {
+               NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+enum mlx4_devlink_param_id {
+       MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+       MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+       MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+};
+
+static const struct devlink_param mlx4_devlink_params[] = {
+       DEVLINK_PARAM_GENERIC(INT_ERR_RESET,
+                             BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
+                             BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+                             mlx4_devlink_ierr_reset_get,
+                             mlx4_devlink_ierr_reset_set, NULL),
+       DEVLINK_PARAM_GENERIC(MAX_MACS,
+                             BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+                             NULL, NULL, mlx4_devlink_max_macs_validate),
+       DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT,
+                             BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
+                             BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+                             mlx4_devlink_crdump_snapshot_get,
+                             mlx4_devlink_crdump_snapshot_set, NULL),
+       DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+                            "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL,
+                            BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+                            NULL, NULL, NULL),
+       DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+                            "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL,
+                            BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+                            NULL, NULL, NULL),
+};
+
+static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
+                                       union devlink_param_value init_val)
+{
+       struct mlx4_priv *priv = devlink_priv(devlink);
+       struct mlx4_dev *dev = &priv->dev;
+       int err;
+
+       err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
+       if (err)
+               mlx4_warn(dev,
+                         "devlink set parameter %u value failed (err = %d)",
+                         param_id, err);
+}
+
+static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
+{
+       union devlink_param_value value;
+
+       value.vbool = !!mlx4_internal_err_reset;
+       mlx4_devlink_set_init_value(devlink,
+                                   DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+                                   value);
+
+       value.vu32 = 1UL << log_num_mac;
+       mlx4_devlink_set_init_value(devlink,
+                                   DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+
+       value.vbool = enable_64b_cqe_eqe;
+       mlx4_devlink_set_init_value(devlink,
+                                   MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+                                   value);
+
+       value.vbool = enable_4k_uar;
+       mlx4_devlink_set_init_value(devlink,
+                                   MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+                                   value);
+
+       value.vbool = false;
+       mlx4_devlink_set_init_value(devlink,
+                                   DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+                                   value);
+}
+
 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
                                              struct mlx4_dev_cap *dev_cap)
 {
@@ -428,6 +553,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
        dev->caps.wol_port[1]          = dev_cap->wol_port[1];
        dev->caps.wol_port[2]          = dev_cap->wol_port[2];
+       dev->caps.health_buffer_addrs  = dev_cap->health_buffer_addrs;
 
        /* Save uar page shift */
        if (!mlx4_is_slave(dev)) {
@@ -3711,10 +3837,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
                }
        }
 
-       err = mlx4_catas_init(&priv->dev);
+       err = mlx4_crdump_init(&priv->dev);
        if (err)
                goto err_release_regions;
 
+       err = mlx4_catas_init(&priv->dev);
+       if (err)
+               goto err_crdump;
+
        err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
        if (err)
                goto err_catas;
@@ -3724,6 +3854,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
 err_catas:
        mlx4_catas_end(&priv->dev);
 
+err_crdump:
+       mlx4_crdump_end(&priv->dev);
+
 err_release_regions:
        pci_release_regions(pdev);
 
@@ -3757,8 +3890,68 @@ static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
        return __set_port_type(info, mlx4_port_type);
 }
 
+static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
+{
+       struct mlx4_priv *priv = devlink_priv(devlink);
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
+       union devlink_param_value saved_value;
+       int err;
+
+       err = devlink_param_driverinit_value_get(devlink,
+                                                DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+                                                &saved_value);
+       if (!err && mlx4_internal_err_reset != saved_value.vbool) {
+               mlx4_internal_err_reset = saved_value.vbool;
+               /* Notify on value changed on runtime configuration mode */
+               devlink_param_value_changed(devlink,
+                                           DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
+       }
+       err = devlink_param_driverinit_value_get(devlink,
+                                                DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+                                                &saved_value);
+       if (!err)
+               log_num_mac = order_base_2(saved_value.vu32);
+       err = devlink_param_driverinit_value_get(devlink,
+                                                MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+                                                &saved_value);
+       if (!err)
+               enable_64b_cqe_eqe = saved_value.vbool;
+       err = devlink_param_driverinit_value_get(devlink,
+                                                MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+                                                &saved_value);
+       if (!err)
+               enable_4k_uar = saved_value.vbool;
+       err = devlink_param_driverinit_value_get(devlink,
+                                                DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+                                                &saved_value);
+       if (!err && crdump->snapshot_enable != saved_value.vbool) {
+               crdump->snapshot_enable = saved_value.vbool;
+               devlink_param_value_changed(devlink,
+                                           DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
+       }
+}
+
+static int mlx4_devlink_reload(struct devlink *devlink,
+                              struct netlink_ext_ack *extack)
+{
+       struct mlx4_priv *priv = devlink_priv(devlink);
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_dev_persistent *persist = dev->persist;
+       int err;
+
+       if (persist->num_vfs)
+               mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
+       err = mlx4_restart_one(persist->pdev, true, devlink);
+       if (err)
+               mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err);
+
+       return err;
+}
+
 static const struct devlink_ops mlx4_devlink_ops = {
        .port_type_set  = mlx4_devlink_port_type_set,
+       .reload         = mlx4_devlink_reload,
 };
 
 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -3792,14 +3985,21 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        ret = devlink_register(devlink, &pdev->dev);
        if (ret)
                goto err_persist_free;
-
-       ret =  __mlx4_init_one(pdev, id->driver_data, priv);
+       ret = devlink_params_register(devlink, mlx4_devlink_params,
+                                     ARRAY_SIZE(mlx4_devlink_params));
        if (ret)
                goto err_devlink_unregister;
+       mlx4_devlink_set_params_init_values(devlink);
+       ret =  __mlx4_init_one(pdev, id->driver_data, priv);
+       if (ret)
+               goto err_params_unregister;
 
        pci_save_state(pdev);
        return 0;
 
+err_params_unregister:
+       devlink_params_unregister(devlink, mlx4_devlink_params,
+                                 ARRAY_SIZE(mlx4_devlink_params));
 err_devlink_unregister:
        devlink_unregister(devlink);
 err_persist_free:
@@ -3929,6 +4129,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        else
                mlx4_info(dev, "%s: interface is down\n", __func__);
        mlx4_catas_end(dev);
+       mlx4_crdump_end(dev);
        if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
                mlx4_warn(dev, "Disabling SR-IOV\n");
                pci_disable_sriov(pdev);
@@ -3936,6 +4137,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        mlx4_pci_disable_device(dev);
+       devlink_params_unregister(devlink, mlx4_devlink_params,
+                                 ARRAY_SIZE(mlx4_devlink_params));
        devlink_unregister(devlink);
        kfree(dev->persist);
        devlink_free(devlink);
@@ -3960,7 +4163,7 @@ static int restore_current_port_types(struct mlx4_dev *dev,
        return err;
 }
 
-int mlx4_restart_one(struct pci_dev *pdev)
+int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink)
 {
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
        struct mlx4_dev  *dev  = persist->dev;
@@ -3973,6 +4176,8 @@ int mlx4_restart_one(struct pci_dev *pdev)
        memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
 
        mlx4_unload_one(pdev);
+       if (reload)
+               mlx4_devlink_param_load_driverinit_values(devlink);
        err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
        if (err) {
                mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
index cb9e923e83996499c2d98c8023b5ebc069bb0731..6e016092a6f138c9d95e65c3920915c6d29858db 100644 (file)
@@ -1042,7 +1042,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
 int mlx4_catas_init(struct mlx4_dev *dev);
 void mlx4_catas_end(struct mlx4_dev *dev);
-int mlx4_restart_one(struct pci_dev *pdev);
+int mlx4_crdump_init(struct mlx4_dev *dev);
+void mlx4_crdump_end(struct mlx4_dev *dev);
+int mlx4_restart_one(struct pci_dev *pdev, bool reload,
+                    struct devlink *devlink);
 int mlx4_register_device(struct mlx4_dev *dev);
 void mlx4_unregister_device(struct mlx4_dev *dev);
 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
@@ -1227,6 +1230,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
 int mlx4_comm_internal_err(u32 slave_read);
 
+int mlx4_crdump_collect(struct mlx4_dev *dev);
+
 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
                    enum mlx4_port_type *type);
 void mlx4_do_sense_ports(struct mlx4_dev *dev,
index ace6545f82e6b343d26acd6d0bb4c55cd6ae4809..c3228b89df463597de1cb546754ea1b8aa4d876d 100644 (file)
@@ -699,7 +699,8 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
 
 void mlx4_en_tx_irq(struct mlx4_cq *mcq);
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
-                        void *accel_priv, select_queue_fallback_t fallback);
+                        struct net_device *sb_dev,
+                        select_queue_fallback_t fallback);
 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
 netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
                               struct mlx4_en_rx_alloc *frame,
index 9efbf193ad5a6ac26ec2599cbd7ba93ab8ec951b..d923f2f58608fedbb80f8b64c5d9d7208e225ee9 100644 (file)
@@ -14,8 +14,8 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
                fpga/ipsec.o fpga/tls.o
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
-               en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
-               en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
+               en_tx.o en_rx.o en_dim.o en_txrx.o en_accel/rxtx.o en_stats.o  \
+               vxlan.o en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o
 
 mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
new file mode 100644 (file)
index 0000000..c132604
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef __MLX5E_ACCEL_H__
+#define __MLX5E_ACCEL_H__
+
+#ifdef CONFIG_MLX5_ACCEL
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "en.h"
+
+static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
+{
+       __be16 *ethtype;
+
+       if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
+               return false;
+       ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
+       if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+               return false;
+       return true;
+}
+
+static inline void remove_metadata_hdr(struct sk_buff *skb)
+{
+       struct ethhdr *old_eth;
+       struct ethhdr *new_eth;
+
+       /* Remove the metadata from the buffer */
+       old_eth = (struct ethhdr *)skb->data;
+       new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
+       memmove(new_eth, old_eth, 2 * ETH_ALEN);
+       /* Ethertype is already in its new place */
+       skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+}
+
+#endif /* CONFIG_MLX5_ACCEL */
+
+#endif /* __MLX5E_EN_ACCEL_H__ */
index 77ac19f38cbe87c2268762cf027d08f8b2451872..da7bd26368f9bd4d19da7ff7b98ef3048860684c 100644 (file)
 #include "mlx5_core.h"
 #include "fpga/tls.h"
 
-int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
-                              struct tls_crypto_info *crypto_info,
-                              u32 start_offload_tcp_sn, u32 *p_swid)
+int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+                           struct tls_crypto_info *crypto_info,
+                           u32 start_offload_tcp_sn, u32 *p_swid,
+                           bool direction_sx)
 {
-       return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info,
-                                        start_offload_tcp_sn, p_swid);
+       return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
+                                     start_offload_tcp_sn, p_swid,
+                                     direction_sx);
 }
 
-void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid)
+void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+                            bool direction_sx)
 {
-       mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL);
+       mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
+}
+
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+                            u64 rcd_sn)
+{
+       return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
 }
 
 bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
index 6f9c9f446ecc8c129079d9fda018dbcedb9b84f3..2228c1083528c2999369322a15cf599ac9316395 100644 (file)
@@ -60,10 +60,14 @@ struct mlx5_ifc_tls_flow_bits {
        u8         reserved_at_2[0x1e];
 };
 
-int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
-                              struct tls_crypto_info *crypto_info,
-                              u32 start_offload_tcp_sn, u32 *p_swid);
-void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid);
+int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+                           struct tls_crypto_info *crypto_info,
+                           u32 start_offload_tcp_sn, u32 *p_swid,
+                           bool direction_sx);
+void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+                            bool direction_sx);
+int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+                            u64 rcd_sn);
 bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
 u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
 int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
@@ -71,11 +75,15 @@ void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
 
 #else
 
-static inline int
-mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
-                          struct tls_crypto_info *crypto_info,
-                          u32 start_offload_tcp_sn, u32 *p_swid) { return 0; }
-static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { }
+static int
+mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+                       struct tls_crypto_info *crypto_info,
+                       u32 start_offload_tcp_sn, u32 *p_swid,
+                       bool direction_sx) { return -ENOTSUPP; }
+static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+                                          bool direction_sx) { }
+static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
+                                          u32 seq, u64 rcd_sn) { return 0; }
 static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
 static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
 static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
index 487388aed98f22cc9ae814fd60d27b48d5105458..384c1fa490811ee651919c139b9cd9e724d4ff81 100644 (file)
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work)
        unsigned long flags;
        bool poll_cmd = ent->polling;
        int alloc_ret;
+       int cmd_mode;
 
        sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
        down(sem);
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work)
        set_signature(ent, !cmd->checksum_disabled);
        dump_command(dev, ent, 1);
        ent->ts1 = ktime_get_ns();
+       cmd_mode = cmd->mode;
 
        if (ent->callback)
                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work)
        iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
        mmiowb();
        /* if not in polling don't use ent after this point */
-       if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
+       if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
                poll_timeout(ent);
                /* make sure we read the descriptor after ownership is SW */
                rmb();
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
 {
        struct mlx5_core_dev *dev = filp->private_data;
        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-       char outlen_str[8];
+       char outlen_str[8] = {0};
        int outlen;
        void *ptr;
        int err;
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
        if (copy_from_user(outlen_str, buf, count))
                return -EFAULT;
 
-       outlen_str[7] = 0;
-
        err = sscanf(outlen_str, "%d", &outlen);
        if (err < 0)
                return err;
index eb9eb7aa953ae5560db4746569a02f177bcddf13..e1b237ccdf56d9fa17a75e6444d6e718d3a705c6 100644 (file)
@@ -137,7 +137,6 @@ struct page_pool;
 #define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 #define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 #define MLX5E_TX_CQ_POLL_BUDGET        128
-#define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
 #define MLX5E_SQ_RECOVER_MIN_INTERVAL  500 /* msecs */
 
 #define MLX5E_UMR_WQE_INLINE_SZ \
@@ -866,7 +865,8 @@ struct mlx5e_profile {
 void mlx5e_build_ptys2ethtool_map(void);
 
 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
-                      void *accel_priv, select_queue_fallback_t fallback);
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback);
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                          struct mlx5e_tx_wqe *wqe, u16 pi);
index f20074dbef32c3c2d2bfafcf500cbccde6ef84c9..39a5d13ba45997384b304f1ae4eeed03533a0b7a 100644 (file)
 #ifndef __MLX5E_EN_ACCEL_H__
 #define __MLX5E_EN_ACCEL_H__
 
-#ifdef CONFIG_MLX5_ACCEL
-
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include "en_accel/ipsec_rxtx.h"
 #include "en_accel/tls_rxtx.h"
+#include "en_accel/rxtx.h"
 #include "en.h"
 
 static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
@@ -64,9 +63,13 @@ static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
        }
 #endif
 
+       if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+               skb = mlx5e_udp_gso_handle_tx_skb(dev, sq, skb, wqe, pi);
+               if (unlikely(!skb))
+                       return NULL;
+       }
+
        return skb;
 }
 
-#endif /* CONFIG_MLX5_ACCEL */
-
 #endif /* __MLX5E_EN_ACCEL_H__ */
index c245d8e78509f4c791a4099238eeba0a027948ff..128a82b1dbfc66147c1df824613f5d1e807a6e40 100644 (file)
@@ -37,6 +37,7 @@
 
 #include "en_accel/ipsec_rxtx.h"
 #include "en_accel/ipsec.h"
+#include "accel/accel.h"
 #include "en.h"
 
 enum {
@@ -346,19 +347,12 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
 }
 
 struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
-                                         struct sk_buff *skb)
+                                         struct sk_buff *skb, u32 *cqe_bcnt)
 {
        struct mlx5e_ipsec_metadata *mdata;
-       struct ethhdr *old_eth;
-       struct ethhdr *new_eth;
        struct xfrm_state *xs;
-       __be16 *ethtype;
 
-       /* Detect inline metadata */
-       if (skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)
-               return skb;
-       ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
-       if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
+       if (!is_metadata_hdr_valid(skb))
                return skb;
 
        /* Use the metadata */
@@ -369,12 +363,8 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
                return NULL;
        }
 
-       /* Remove the metadata from the buffer */
-       old_eth = (struct ethhdr *)skb->data;
-       new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
-       memmove(new_eth, old_eth, 2 * ETH_ALEN);
-       /* Ethertype is already in its new place */
-       skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
+       remove_metadata_hdr(skb);
+       *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
 
        return skb;
 }
index 2bfbbef1b054a9435344c790c97a580a6dec8950..ca47c0540904aaefe5d44bd309940b2f21716ea0 100644 (file)
@@ -41,7 +41,7 @@
 #include "en.h"
 
 struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
-                                         struct sk_buff *skb);
+                                         struct sk_buff *skb, u32 *cqe_bcnt);
 void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 
 void mlx5e_ipsec_inverse_table_init(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c
new file mode 100644 (file)
index 0000000..7b7ec39
--- /dev/null
@@ -0,0 +1,109 @@
+#include "en_accel/rxtx.h"
+
+static void mlx5e_udp_gso_prepare_last_skb(struct sk_buff *skb,
+                                          struct sk_buff *nskb,
+                                          int remaining)
+{
+       int bytes_needed = remaining, remaining_headlen, remaining_page_offset;
+       int headlen = skb_transport_offset(skb) + sizeof(struct udphdr);
+       int payload_len = remaining + sizeof(struct udphdr);
+       int k = 0, i, j;
+
+       skb_copy_bits(skb, 0, nskb->data, headlen);
+       nskb->dev = skb->dev;
+       skb_reset_mac_header(nskb);
+       skb_set_network_header(nskb, skb_network_offset(skb));
+       skb_set_transport_header(nskb, skb_transport_offset(skb));
+       skb_set_tail_pointer(nskb, headlen);
+
+       /* How many frags do we need? */
+       for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+               bytes_needed -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
+               k++;
+               if (bytes_needed <= 0)
+                       break;
+       }
+
+       /* Fill the first frag and split it if necessary */
+       j = skb_shinfo(skb)->nr_frags - k;
+       remaining_page_offset = -bytes_needed;
+       skb_fill_page_desc(nskb, 0,
+                          skb_shinfo(skb)->frags[j].page.p,
+                          skb_shinfo(skb)->frags[j].page_offset + remaining_page_offset,
+                          skb_shinfo(skb)->frags[j].size - remaining_page_offset);
+
+       skb_frag_ref(skb, j);
+
+       /* Fill the rest of the frags */
+       for (i = 1; i < k; i++) {
+               j = skb_shinfo(skb)->nr_frags - k + i;
+
+               skb_fill_page_desc(nskb, i,
+                                  skb_shinfo(skb)->frags[j].page.p,
+                                  skb_shinfo(skb)->frags[j].page_offset,
+                                  skb_shinfo(skb)->frags[j].size);
+               skb_frag_ref(skb, j);
+       }
+       skb_shinfo(nskb)->nr_frags = k;
+
+       remaining_headlen = remaining - skb->data_len;
+
+       /* headlen contains remaining data? */
+       if (remaining_headlen > 0)
+               skb_copy_bits(skb, skb->len - remaining, nskb->data + headlen,
+                             remaining_headlen);
+       nskb->len = remaining + headlen;
+       nskb->data_len =  payload_len - sizeof(struct udphdr) +
+               max_t(int, 0, remaining_headlen);
+       nskb->protocol = skb->protocol;
+       if (nskb->protocol == htons(ETH_P_IP)) {
+               ip_hdr(nskb)->id = htons(ntohs(ip_hdr(nskb)->id) +
+                                        skb_shinfo(skb)->gso_segs);
+               ip_hdr(nskb)->tot_len =
+                       htons(payload_len + sizeof(struct iphdr));
+       } else {
+               ipv6_hdr(nskb)->payload_len = htons(payload_len);
+       }
+       udp_hdr(nskb)->len = htons(payload_len);
+       skb_shinfo(nskb)->gso_size = 0;
+       nskb->ip_summed = skb->ip_summed;
+       nskb->csum_start = skb->csum_start;
+       nskb->csum_offset = skb->csum_offset;
+       nskb->queue_mapping = skb->queue_mapping;
+}
+
+/* might send skbs and update wqe and pi */
+struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev,
+                                           struct mlx5e_txqsq *sq,
+                                           struct sk_buff *skb,
+                                           struct mlx5e_tx_wqe **wqe,
+                                           u16 *pi)
+{
+       int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+       int headlen = skb_transport_offset(skb) + sizeof(struct udphdr);
+       int remaining = (skb->len - headlen) % skb_shinfo(skb)->gso_size;
+       struct sk_buff *nskb;
+
+       if (skb->protocol == htons(ETH_P_IP))
+               ip_hdr(skb)->tot_len = htons(payload_len + sizeof(struct iphdr));
+       else
+               ipv6_hdr(skb)->payload_len = htons(payload_len);
+       udp_hdr(skb)->len = htons(payload_len);
+       if (!remaining)
+               return skb;
+
+       sq->stats->udp_seg_rem++;
+       nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC);
+       if (unlikely(!nskb)) {
+               sq->stats->dropped++;
+               return NULL;
+       }
+
+       mlx5e_udp_gso_prepare_last_skb(skb, nskb, remaining);
+
+       skb_shinfo(skb)->gso_segs--;
+       pskb_trim(skb, skb->len - remaining);
+       mlx5e_sq_xmit(sq, skb, *wqe, *pi);
+       mlx5e_sq_fetch_wqe(sq, wqe, pi);
+       return nskb;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h
new file mode 100644 (file)
index 0000000..ed42699
--- /dev/null
@@ -0,0 +1,14 @@
+
+#ifndef __MLX5E_EN_ACCEL_RX_TX_H__
+#define __MLX5E_EN_ACCEL_RX_TX_H__
+
+#include <linux/skbuff.h>
+#include "en.h"
+
+struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev,
+                                           struct mlx5e_txqsq *sq,
+                                           struct sk_buff *skb,
+                                           struct mlx5e_tx_wqe **wqe,
+                                           u16 *pi);
+
+#endif
index d167845271c33f7e5d568f0b0aa26242cfd98333..eddd7702680bce0284f8518e2f04920116a80e0b 100644 (file)
@@ -110,9 +110,7 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
        u32 caps = mlx5_accel_tls_device_caps(mdev);
        int ret = -ENOMEM;
        void *flow;
-
-       if (direction != TLS_OFFLOAD_CTX_DIR_TX)
-               return -EINVAL;
+       u32 swid;
 
        flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
        if (!flow)
@@ -122,18 +120,23 @@ static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
        if (ret)
                goto free_flow;
 
+       ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
+                                     start_offload_tcp_sn, &swid,
+                                     direction == TLS_OFFLOAD_CTX_DIR_TX);
+       if (ret < 0)
+               goto free_flow;
+
        if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
-               struct mlx5e_tls_offload_context *tx_ctx =
+               struct mlx5e_tls_offload_context_tx *tx_ctx =
                    mlx5e_get_tls_tx_context(tls_ctx);
-               u32 swid;
-
-               ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
-                                                start_offload_tcp_sn, &swid);
-               if (ret < 0)
-                       goto free_flow;
 
                tx_ctx->swid = htonl(swid);
                tx_ctx->expected_seq = start_offload_tcp_sn;
+       } else {
+               struct mlx5e_tls_offload_context_rx *rx_ctx =
+                   mlx5e_get_tls_rx_context(tls_ctx);
+
+               rx_ctx->handle = htonl(swid);
        }
 
        return 0;
@@ -147,30 +150,60 @@ static void mlx5e_tls_del(struct net_device *netdev,
                          enum tls_offload_ctx_dir direction)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
+       unsigned int handle;
 
-       if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
-               u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid);
+       handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
+                      mlx5e_get_tls_tx_context(tls_ctx)->swid :
+                      mlx5e_get_tls_rx_context(tls_ctx)->handle);
 
-               mlx5_accel_tls_del_tx_flow(priv->mdev, swid);
-       } else {
-               netdev_err(netdev, "unsupported direction %d\n", direction);
-       }
+       mlx5_accel_tls_del_flow(priv->mdev, handle,
+                               direction == TLS_OFFLOAD_CTX_DIR_TX);
+}
+
+static void mlx5e_tls_resync_rx(struct net_device *netdev, struct sock *sk,
+                               u32 seq, u64 rcd_sn)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5e_tls_offload_context_rx *rx_ctx;
+
+       rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
+
+       netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
+                   be64_to_cpu(rcd_sn));
+       mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
+       atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
 }
 
 static const struct tlsdev_ops mlx5e_tls_ops = {
        .tls_dev_add = mlx5e_tls_add,
        .tls_dev_del = mlx5e_tls_del,
+       .tls_dev_resync_rx = mlx5e_tls_resync_rx,
 };
 
 void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
 {
+       u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
        struct net_device *netdev = priv->netdev;
 
        if (!mlx5_accel_is_tls_device(priv->mdev))
                return;
 
-       netdev->features |= NETIF_F_HW_TLS_TX;
-       netdev->hw_features |= NETIF_F_HW_TLS_TX;
+       if (caps & MLX5_ACCEL_TLS_TX) {
+               netdev->features          |= NETIF_F_HW_TLS_TX;
+               netdev->hw_features       |= NETIF_F_HW_TLS_TX;
+       }
+
+       if (caps & MLX5_ACCEL_TLS_RX) {
+               netdev->features          |= NETIF_F_HW_TLS_RX;
+               netdev->hw_features       |= NETIF_F_HW_TLS_RX;
+       }
+
+       if (!(caps & MLX5_ACCEL_TLS_LRO)) {
+               netdev->features          &= ~NETIF_F_LRO;
+               netdev->hw_features       &= ~NETIF_F_LRO;
+       }
+
        netdev->tlsdev_ops = &mlx5e_tls_ops;
 }
 
index b6162178f6211371cb72119e89d7a2a908e3059a..3f5d72163b56103c9c8d8c0b6536330e324d848a 100644 (file)
@@ -43,25 +43,44 @@ struct mlx5e_tls_sw_stats {
        atomic64_t tx_tls_drop_resync_alloc;
        atomic64_t tx_tls_drop_no_sync_data;
        atomic64_t tx_tls_drop_bypass_required;
+       atomic64_t rx_tls_drop_resync_request;
+       atomic64_t rx_tls_resync_request;
+       atomic64_t rx_tls_resync_reply;
+       atomic64_t rx_tls_auth_fail;
 };
 
 struct mlx5e_tls {
        struct mlx5e_tls_sw_stats sw_stats;
 };
 
-struct mlx5e_tls_offload_context {
-       struct tls_offload_context base;
+struct mlx5e_tls_offload_context_tx {
+       struct tls_offload_context_tx base;
        u32 expected_seq;
        __be32 swid;
 };
 
-static inline struct mlx5e_tls_offload_context *
+static inline struct mlx5e_tls_offload_context_tx *
 mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
 {
-       BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) >
-                    TLS_OFFLOAD_CONTEXT_SIZE);
-       return container_of(tls_offload_ctx(tls_ctx),
-                           struct mlx5e_tls_offload_context,
+       BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) >
+                    TLS_OFFLOAD_CONTEXT_SIZE_TX);
+       return container_of(tls_offload_ctx_tx(tls_ctx),
+                           struct mlx5e_tls_offload_context_tx,
+                           base);
+}
+
+struct mlx5e_tls_offload_context_rx {
+       struct tls_offload_context_rx base;
+       __be32 handle;
+};
+
+static inline struct mlx5e_tls_offload_context_rx *
+mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
+{
+       BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) >
+                    TLS_OFFLOAD_CONTEXT_SIZE_RX);
+       return container_of(tls_offload_ctx_rx(tls_ctx),
+                           struct mlx5e_tls_offload_context_rx,
                            base);
 }
 
index 15aef71d19576b251891b962bbf4784c90d4cf27..92d37459850eb6ff2a3a1ebc5bee554fb709c884 100644 (file)
 
 #include "en_accel/tls.h"
 #include "en_accel/tls_rxtx.h"
+#include "accel/accel.h"
+
+#include <net/inet6_hashtables.h>
+#include <linux/ipv6.h>
+
+#define SYNDROM_DECRYPTED  0x30
+#define SYNDROM_RESYNC_REQUEST 0x31
+#define SYNDROM_AUTH_FAILED 0x32
 
 #define SYNDROME_OFFLOAD_REQUIRED 32
 #define SYNDROME_SYNC 33
@@ -44,10 +52,26 @@ struct sync_info {
        skb_frag_t frags[MAX_SKB_FRAGS];
 };
 
-struct mlx5e_tls_metadata {
+struct recv_metadata_content {
+       u8 syndrome;
+       u8 reserved;
+       __be32 sync_seq;
+} __packed;
+
+struct send_metadata_content {
        /* One byte of syndrome followed by 3 bytes of swid */
        __be32 syndrome_swid;
        __be16 first_seq;
+} __packed;
+
+struct mlx5e_tls_metadata {
+       union {
+               /* from fpga to host */
+               struct recv_metadata_content recv;
+               /* from host to fpga */
+               struct send_metadata_content send;
+               unsigned char raw[6];
+       } __packed content;
        /* packet type ID field */
        __be16 ethertype;
 } __packed;
@@ -68,12 +92,13 @@ static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
                2 * ETH_ALEN);
 
        eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
-       pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
+       pet->content.send.syndrome_swid =
+               htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
 
        return 0;
 }
 
-static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
+static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
                                   u32 tcp_seq, struct sync_info *info)
 {
        int remaining, i = 0, ret = -EINVAL;
@@ -149,7 +174,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
 
        pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
        memcpy(pet, &syndrome, sizeof(syndrome));
-       pet->first_seq = htons(tcp_seq);
+       pet->content.send.first_seq = htons(tcp_seq);
 
        /* MLX5 devices don't care about the checksum partial start, offset
         * and pseudo header
@@ -161,7 +186,7 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
 }
 
 static struct sk_buff *
-mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
+mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
                     struct mlx5e_txqsq *sq, struct sk_buff *skb,
                     struct mlx5e_tx_wqe **wqe,
                     u16 *pi,
@@ -239,7 +264,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
                                        u16 *pi)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5e_tls_offload_context *context;
+       struct mlx5e_tls_offload_context_tx *context;
        struct tls_context *tls_ctx;
        u32 expected_seq;
        int datalen;
@@ -276,3 +301,83 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
 out:
        return skb;
 }
+
+static int tls_update_resync_sn(struct net_device *netdev,
+                               struct sk_buff *skb,
+                               struct mlx5e_tls_metadata *mdata)
+{
+       struct sock *sk = NULL;
+       struct iphdr *iph;
+       struct tcphdr *th;
+       __be32 seq;
+
+       if (mdata->ethertype != htons(ETH_P_IP))
+               return -EINVAL;
+
+       iph = (struct iphdr *)(mdata + 1);
+
+       th = ((void *)iph) + iph->ihl * 4;
+
+       if (iph->version == 4) {
+               sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+                                            iph->saddr, th->source, iph->daddr,
+                                            th->dest, netdev->ifindex);
+#if IS_ENABLED(CONFIG_IPV6)
+       } else {
+               struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
+
+               sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+                                               &ipv6h->saddr, th->source,
+                                               &ipv6h->daddr, th->dest,
+                                               netdev->ifindex, 0);
+#endif
+       }
+       if (!sk || sk->sk_state == TCP_TIME_WAIT) {
+               struct mlx5e_priv *priv = netdev_priv(netdev);
+
+               atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
+               goto out;
+       }
+
+       skb->sk = sk;
+       skb->destructor = sock_edemux;
+
+       memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
+       tls_offload_rx_resync_request(sk, seq);
+out:
+       return 0;
+}
+
+void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+                            u32 *cqe_bcnt)
+{
+       struct mlx5e_tls_metadata *mdata;
+       struct mlx5e_priv *priv;
+
+       if (!is_metadata_hdr_valid(skb))
+               return;
+
+       /* Use the metadata */
+       mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
+       switch (mdata->content.recv.syndrome) {
+       case SYNDROM_DECRYPTED:
+               skb->decrypted = 1;
+               break;
+       case SYNDROM_RESYNC_REQUEST:
+               tls_update_resync_sn(netdev, skb, mdata);
+               priv = netdev_priv(netdev);
+               atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
+               break;
+       case SYNDROM_AUTH_FAILED:
+               /* Authentication failure will be observed and verified by kTLS */
+               priv = netdev_priv(netdev);
+               atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
+               break;
+       default:
+               /* Bypass the metadata header to others */
+               return;
+       }
+
+       remove_metadata_hdr(skb);
+       *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
+}
index 405dfd302225c4f3737efe79efe0b7c07b9d6871..311667ec71b89b5007370eb75b9cf2e525b131bc 100644 (file)
@@ -45,6 +45,9 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
                                        struct mlx5e_tx_wqe **wqe,
                                        u16 *pi);
 
+void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+                            u32 *cqe_bcnt);
+
 #endif /* CONFIG_MLX5_EN_TLS */
 
 #endif /* __MLX5E_TLS_RXTX_H__ */
index 56c1b6f5593e053d4629b15635bacf1ece9d6a88..712b9766485f4b4f5e89e6e926c53c97e326eec8 100644 (file)
@@ -270,12 +270,9 @@ void mlx5e_update_stats_work(struct work_struct *work)
        struct delayed_work *dwork = to_delayed_work(work);
        struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
                                               update_stats_work);
+
        mutex_lock(&priv->state_lock);
-       if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
-               priv->profile->update_stats(priv);
-               queue_delayed_work(priv->wq, dwork,
-                                  msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
-       }
+       priv->profile->update_stats(priv);
        mutex_unlock(&priv->state_lock);
 }
 
@@ -352,8 +349,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
 {
        int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
 
-       rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info),
-                                     GFP_KERNEL, cpu_to_node(c->cpu));
+       rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
+                                                 sizeof(*rq->mpwqe.info)),
+                                      GFP_KERNEL, cpu_to_node(c->cpu));
        if (!rq->mpwqe.info)
                return -ENOMEM;
 
@@ -670,7 +668,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 err_free:
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               kfree(rq->mpwqe.info);
+               kvfree(rq->mpwqe.info);
                mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
                break;
        default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -702,7 +700,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
 
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               kfree(rq->mpwqe.info);
+               kvfree(rq->mpwqe.info);
                mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
                break;
        default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -965,15 +963,15 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
 
 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
 {
-       kfree(sq->db.di);
+       kvfree(sq->db.di);
 }
 
 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
 {
        int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
 
-       sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di),
-                                    GFP_KERNEL, numa);
+       sq->db.di = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.di)),
+                                 GFP_KERNEL, numa);
        if (!sq->db.di) {
                mlx5e_free_xdpsq_db(sq);
                return -ENOMEM;
@@ -1024,15 +1022,16 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
 
 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
 {
-       kfree(sq->db.ico_wqe);
+       kvfree(sq->db.ico_wqe);
 }
 
 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
 {
        u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
 
-       sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe),
-                                     GFP_KERNEL, numa);
+       sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
+                                                 sizeof(*sq->db.ico_wqe)),
+                                      GFP_KERNEL, numa);
        if (!sq->db.ico_wqe)
                return -ENOMEM;
 
@@ -1077,8 +1076,8 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
 
 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
 {
-       kfree(sq->db.wqe_info);
-       kfree(sq->db.dma_fifo);
+       kvfree(sq->db.wqe_info);
+       kvfree(sq->db.dma_fifo);
 }
 
 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
@@ -1086,10 +1085,12 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
        int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
        int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
 
-       sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo),
-                                          GFP_KERNEL, numa);
-       sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info),
-                                          GFP_KERNEL, numa);
+       sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
+                                                  sizeof(*sq->db.dma_fifo)),
+                                       GFP_KERNEL, numa);
+       sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
+                                                  sizeof(*sq->db.wqe_info)),
+                                       GFP_KERNEL, numa);
        if (!sq->db.dma_fifo || !sq->db.wqe_info) {
                mlx5e_free_txqsq_db(sq);
                return -ENOMEM;
@@ -1893,7 +1894,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        int err;
        int eqn;
 
-       c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+       c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
        if (!c)
                return -ENOMEM;
 
@@ -1979,7 +1980,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 
 err_napi_del:
        netif_napi_del(&c->napi);
-       kfree(c);
+       kvfree(c);
 
        return err;
 }
@@ -2018,7 +2019,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
        mlx5e_close_cq(&c->icosq.cq);
        netif_napi_del(&c->napi);
 
-       kfree(c);
+       kvfree(c);
 }
 
 #define DEFAULT_FRAG_SIZE (2048)
@@ -2276,7 +2277,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
        chs->num = chs->params.num_channels;
 
        chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
-       cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+       cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
        if (!chs->c || !cparam)
                goto err_free;
 
@@ -2287,7 +2288,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
                        goto err_close_channels;
        }
 
-       kfree(cparam);
+       kvfree(cparam);
        return 0;
 
 err_close_channels:
@@ -2296,7 +2297,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
 
 err_free:
        kfree(chs->c);
-       kfree(cparam);
+       kvfree(cparam);
        chs->num = 0;
        return err;
 }
@@ -2846,7 +2847,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
        mlx5e_activate_channels(&priv->channels);
        netif_tx_start_all_queues(priv->netdev);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_add_sqs_fwd_rules(priv);
 
        mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2857,7 +2858,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
 {
        mlx5e_redirect_rqts_to_drop(priv);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_remove_sqs_fwd_rules(priv);
 
        /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -3371,7 +3372,7 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
-                                            priv, priv);
+                                            priv, priv, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
                                        priv);
@@ -3405,6 +3406,9 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
        struct mlx5e_vport_stats *vstats = &priv->stats.vport;
        struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 
+       /* update HW stats in background for next time */
+       queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
        if (mlx5e_is_uplink_rep(priv)) {
                stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
                stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
@@ -4192,7 +4196,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
                return mlx5e_xdp_set(dev, xdp->prog);
        case XDP_QUERY_PROG:
                xdp->prog_id = mlx5e_xdp_query(dev);
-               xdp->prog_attached = !!xdp->prog_id;
                return 0;
        default:
                return -EINVAL;
@@ -4592,12 +4595,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        netdev->features         |= NETIF_F_HIGHDMA;
        netdev->features         |= NETIF_F_HW_VLAN_STAG_FILTER;
 
+       netdev->features         |= NETIF_F_GSO_UDP_L4;
+       netdev->hw_features      |= NETIF_F_GSO_UDP_L4;
+
        netdev->priv_flags       |= IFF_UNICAST_FLT;
 
        mlx5e_set_netdev_dev_addr(netdev);
 
 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
-       if (MLX5_VPORT_MANAGER(mdev))
+       if (MLX5_ESWITCH_MANAGER(mdev))
                netdev->switchdev_ops = &mlx5e_switchdev_ops;
 #endif
 
@@ -4753,7 +4759,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
 
        mlx5e_enable_async_events(priv);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_register_vport_reps(priv);
 
        if (netdev->reg_state != NETREG_REGISTERED)
@@ -4788,7 +4794,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
 
        queue_work(priv->wq, &priv->set_rx_mode_work);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_unregister_vport_reps(priv);
 
        mlx5e_disable_async_events(priv);
@@ -4972,7 +4978,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
                return NULL;
 
 #ifdef CONFIG_MLX5_ESWITCH
-       if (MLX5_VPORT_MANAGER(mdev)) {
+       if (MLX5_ESWITCH_MANAGER(mdev)) {
                rpriv = mlx5e_alloc_nic_rep_priv(mdev);
                if (!rpriv) {
                        mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
index 57987f6546e8357bdfaeb3e657e0f07fe47d940a..8e3c5b4b90ab9d77e7833a1ce4db7db042b108dc 100644 (file)
@@ -797,7 +797,7 @@ static int mlx5e_rep_setup_tc_block(struct net_device *dev,
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
-                                            priv, priv);
+                                            priv, priv, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
                return 0;
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch_rep *rep;
 
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+       if (!MLX5_ESWITCH_MANAGER(priv->mdev))
                return false;
 
        rep = rpriv->rep;
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
 static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
 {
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
-       struct mlx5_eswitch_rep *rep = rpriv->rep;
+       struct mlx5_eswitch_rep *rep;
+
+       if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+               return false;
 
+       rep = rpriv->rep;
        if (rep && rep->vport != FDB_UPLINK_VPORT)
                return true;
 
@@ -893,6 +897,9 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
+       /* update HW stats in background for next time */
+       queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
        memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
 }
 
index d3a1dd20e41d4c8b3d68669d1fc40a9b4e1e63e4..1d5295ee863cb47de7067f140f1edbf427123c66 100644 (file)
@@ -44,6 +44,7 @@
 #include "en_rep.h"
 #include "ipoib/ipoib.h"
 #include "en_accel/ipsec_rxtx.h"
+#include "en_accel/tls_rxtx.h"
 #include "lib/clock.h"
 
 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
@@ -487,7 +488,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
 
        sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
        sq->pc += MLX5E_UMR_WQEBBS;
-       mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
+       mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
 
        return 0;
 
@@ -601,6 +602,8 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
 
        if (!rq->mpwqe.umr_in_progress)
                mlx5e_alloc_rx_mpwqe(rq, wq->head);
+       else
+               rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2;
 
        return false;
 }
@@ -795,6 +798,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
        struct net_device *netdev = rq->netdev;
 
        skb->mac_len = ETH_HLEN;
+
+#ifdef CONFIG_MLX5_EN_TLS
+       mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
+#endif
+
        if (lro_num_seg > 1) {
                mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
                skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@ -1261,7 +1269,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        }
 
        if (unlikely(mpwrq_is_filler_cqe(cqe))) {
-               rq->stats->mpwqe_filler++;
+               struct mlx5e_rq_stats *stats = rq->stats;
+
+               stats->mpwqe_filler_cqes++;
+               stats->mpwqe_filler_strides += cstrides;
                goto mpwrq_cqe_out;
        }
 
@@ -1383,6 +1394,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
                } while (!last_wqe);
        } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 
+       rq->stats->xdp_tx_cqe += i;
+
        mlx5_cqwq_update_db_record(&cq->wq);
 
        /* ensure cq space is freed before enabling more cqes */
@@ -1534,7 +1547,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
                mlx5e_free_rx_wqe(rq, wi);
                goto wq_cyc_pop;
        }
-       skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb);
+       skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
        if (unlikely(!skb)) {
                mlx5e_free_rx_wqe(rq, wi);
                goto wq_cyc_pop;
index 1646859974ce2b8fc76798693a193e8b40d9e158..c0507fada0be86244826e5c9e1000cb618806651 100644 (file)
@@ -44,6 +44,7 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
 
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
@@ -59,6 +60,7 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
@@ -67,10 +69,13 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@@ -80,6 +85,11 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
 };
 
@@ -133,9 +143,11 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_xdp_drop += rq_stats->xdp_drop;
                s->rx_xdp_tx += rq_stats->xdp_tx;
+               s->rx_xdp_tx_cqe  += rq_stats->xdp_tx_cqe;
                s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
                s->rx_wqe_err   += rq_stats->wqe_err;
-               s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+               s->rx_mpwqe_filler_cqes    += rq_stats->mpwqe_filler_cqes;
+               s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
                s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
                s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
                s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@ -145,6 +157,11 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                s->rx_cache_empty += rq_stats->cache_empty;
                s->rx_cache_busy  += rq_stats->cache_busy;
                s->rx_cache_waive += rq_stats->cache_waive;
+               s->rx_congst_umr  += rq_stats->congst_umr;
+               s->ch_events      += ch_stats->events;
+               s->ch_poll        += ch_stats->poll;
+               s->ch_arm         += ch_stats->arm;
+               s->ch_aff_change  += ch_stats->aff_change;
                s->ch_eq_rearm += ch_stats->eq_rearm;
 
                for (j = 0; j < priv->max_opened_tc; j++) {
@@ -157,8 +174,10 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                        s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
                        s->tx_tso_inner_bytes   += sq_stats->tso_inner_bytes;
                        s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
+                       s->tx_nop               += sq_stats->nop;
                        s->tx_queue_stopped     += sq_stats->stopped;
                        s->tx_queue_wake        += sq_stats->wake;
+                       s->tx_udp_seg_rem       += sq_stats->udp_seg_rem;
                        s->tx_queue_dropped     += sq_stats->dropped;
                        s->tx_cqe_err           += sq_stats->cqe_err;
                        s->tx_recover           += sq_stats->recover;
@@ -170,6 +189,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                        s->tx_tls_ooo           += sq_stats->tls_ooo;
                        s->tx_tls_resync_bytes  += sq_stats->tls_resync_bytes;
 #endif
+                       s->tx_cqes              += sq_stats->cqes;
                }
        }
 
@@ -1107,12 +1127,14 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_cqe) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
-       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
@@ -1122,6 +1144,7 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
 };
 
 static const struct counter_desc sq_stats_desc[] = {
@@ -1140,11 +1163,16 @@ static const struct counter_desc sq_stats_desc[] = {
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
 };
 
 static const struct counter_desc ch_stats_desc[] = {
+       { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
+       { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
+       { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
+       { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
        { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
 };
 
index 643153bb360722375c822f7f530e670c31c7f736..fc3f66003eddd5944be3710596d7d3263f7f44bc 100644 (file)
@@ -61,6 +61,7 @@ struct mlx5e_sw_stats {
        u64 tx_tso_inner_packets;
        u64 tx_tso_inner_bytes;
        u64 tx_added_vlan_packets;
+       u64 tx_nop;
        u64 rx_lro_packets;
        u64 rx_lro_bytes;
        u64 rx_removed_vlan_packets;
@@ -70,6 +71,7 @@ struct mlx5e_sw_stats {
        u64 rx_csum_unnecessary_inner;
        u64 rx_xdp_drop;
        u64 rx_xdp_tx;
+       u64 rx_xdp_tx_cqe;
        u64 rx_xdp_tx_full;
        u64 tx_csum_none;
        u64 tx_csum_partial;
@@ -78,10 +80,13 @@ struct mlx5e_sw_stats {
        u64 tx_queue_dropped;
        u64 tx_xmit_more;
        u64 tx_recover;
+       u64 tx_cqes;
        u64 tx_queue_wake;
+       u64 tx_udp_seg_rem;
        u64 tx_cqe_err;
        u64 rx_wqe_err;
-       u64 rx_mpwqe_filler;
+       u64 rx_mpwqe_filler_cqes;
+       u64 rx_mpwqe_filler_strides;
        u64 rx_buff_alloc_err;
        u64 rx_cqe_compress_blks;
        u64 rx_cqe_compress_pkts;
@@ -91,6 +96,11 @@ struct mlx5e_sw_stats {
        u64 rx_cache_empty;
        u64 rx_cache_busy;
        u64 rx_cache_waive;
+       u64 rx_congst_umr;
+       u64 ch_events;
+       u64 ch_poll;
+       u64 ch_arm;
+       u64 ch_aff_change;
        u64 ch_eq_rearm;
 
 #ifdef CONFIG_MLX5_EN_TLS
@@ -169,9 +179,11 @@ struct mlx5e_rq_stats {
        u64 removed_vlan_packets;
        u64 xdp_drop;
        u64 xdp_tx;
+       u64 xdp_tx_cqe;
        u64 xdp_tx_full;
        u64 wqe_err;
-       u64 mpwqe_filler;
+       u64 mpwqe_filler_cqes;
+       u64 mpwqe_filler_strides;
        u64 buff_alloc_err;
        u64 cqe_compress_blks;
        u64 cqe_compress_pkts;
@@ -181,6 +193,7 @@ struct mlx5e_rq_stats {
        u64 cache_empty;
        u64 cache_busy;
        u64 cache_waive;
+       u64 congst_umr;
 };
 
 struct mlx5e_sq_stats {
@@ -196,6 +209,7 @@ struct mlx5e_sq_stats {
        u64 csum_partial_inner;
        u64 added_vlan_packets;
        u64 nop;
+       u64 udp_seg_rem;
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_ooo;
        u64 tls_resync_bytes;
@@ -206,11 +220,16 @@ struct mlx5e_sq_stats {
        u64 dropped;
        u64 recover;
        /* dirtied @completion */
-       u64 wake ____cacheline_aligned_in_smp;
+       u64 cqes ____cacheline_aligned_in_smp;
+       u64 wake;
        u64 cqe_err;
 };
 
 struct mlx5e_ch_stats {
+       u64 events;
+       u64 poll;
+       u64 arm;
+       u64 aff_change;
        u64 eq_rearm;
 };
 
index f29deb44bf3b162edefcb0a86af9662789fbde69..9106ea45e3cb7a4f9dac56c4b9ce93e84064331c 100644 (file)
@@ -111,10 +111,11 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
 #endif
 
 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
-                      void *accel_priv, select_queue_fallback_t fallback)
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       int channel_ix = fallback(dev, skb);
+       int channel_ix = fallback(dev, skb, NULL);
        u16 num_channels;
        int up = 0;
 
@@ -228,7 +229,10 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
                stats->tso_inner_packets++;
                stats->tso_inner_bytes += skb->len - ihs;
        } else {
-               ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+                       ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
+               else
+                       ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
                stats->tso_packets++;
                stats->tso_bytes += skb->len - ihs;
        }
@@ -443,12 +447,11 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        sq = priv->txq2sq[skb_get_queue_mapping(skb)];
        mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
 
-#ifdef CONFIG_MLX5_ACCEL
        /* might send skbs and update wqe and pi */
        skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
        if (unlikely(!skb))
                return NETDEV_TX_OK;
-#endif
+
        return mlx5e_sq_xmit(sq, skb, wqe, pi);
 }
 
@@ -466,6 +469,7 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
 
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 {
+       struct mlx5e_sq_stats *stats;
        struct mlx5e_txqsq *sq;
        struct mlx5_cqe64 *cqe;
        u32 dma_fifo_cc;
@@ -483,6 +487,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
        if (!cqe)
                return false;
 
+       stats = sq->stats;
+
        npkts = 0;
        nbytes = 0;
 
@@ -511,7 +517,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
                                queue_work(cq->channel->priv->wq,
                                           &sq->recover.recover_work);
                        }
-                       sq->stats->cqe_err++;
+                       stats->cqe_err++;
                }
 
                do {
@@ -556,6 +562,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 
        } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 
+       stats->cqes += i;
+
        mlx5_cqwq_update_db_record(&cq->wq);
 
        /* ensure cq space is freed before enabling more cqes */
@@ -571,7 +579,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
                                   MLX5E_SQ_STOP_ROOM) &&
            !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
                netif_tx_wake_queue(sq->txq);
-               sq->stats->wake++;
+               stats->wake++;
        }
 
        return (i == MLX5E_TX_CQ_POLL_BUDGET);
index 1b17f682693b90b09300fcf92fd7ed74aa29fef5..4e1f99a98d5dd35c835f8af2863dc9cd4d6e7635 100644 (file)
@@ -74,10 +74,13 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
 {
        struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
                                               napi);
+       struct mlx5e_ch_stats *ch_stats = c->stats;
        bool busy = false;
        int work_done = 0;
        int i;
 
+       ch_stats->poll++;
+
        for (i = 0; i < c->num_tc; i++)
                busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
 
@@ -94,6 +97,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
        if (busy) {
                if (likely(mlx5e_channel_no_affinity_change(c)))
                        return budget;
+               ch_stats->aff_change++;
                if (budget && work_done == budget)
                        work_done--;
        }
@@ -101,6 +105,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
        if (unlikely(!napi_complete_done(napi, work_done)))
                return work_done;
 
+       ch_stats->arm++;
+
        for (i = 0; i < c->num_tc; i++) {
                mlx5e_handle_tx_dim(&c->sq[i]);
                mlx5e_cq_arm(&c->sq[i].cq);
@@ -118,8 +124,9 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
 {
        struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
 
-       cq->event_ctr++;
        napi_schedule(cq->napi);
+       cq->event_ctr++;
+       cq->channel->stats->events++;
 }
 
 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
index f63dfbcd29fea1efc2237d6dcecdbdd74259e1a0..b79d74860a304669eb4e05cf03fa0a267213926a 100644 (file)
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
 }
 
 /* Public E-Switch API */
-#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
+#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
+
 
 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
 {
        int err;
        int i, enabled_events;
 
-       if (!ESW_ALLOWED(esw))
-               return 0;
-
-       if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
+       if (!ESW_ALLOWED(esw) ||
            !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
                esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
                return -EOPNOTSUPP;
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
        u64 node_guid;
        int err = 0;
 
-       if (!ESW_ALLOWED(esw))
+       if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
                return -EINVAL;
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 {
        struct mlx5_vport *evport;
 
-       if (!ESW_ALLOWED(esw))
+       if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport))
                return -EINVAL;
index cecd201f0b73ab8a42693a79070c21bcc850d6e4..91f1209886ffdbb37af33ac32369f312296f8bfa 100644 (file)
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
        if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
                return -EOPNOTSUPP;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
+       if(!MLX5_ESWITCH_MANAGER(dev))
+               return -EPERM;
 
        if (dev->priv.eswitch->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
index c9736238604ab266d320e380bbcb03e1b4b3cdb9..5cf5f2a9d51fec724f4fac709e29e40f4110d5f7 100644 (file)
@@ -129,6 +129,7 @@ static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
 static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
                                    void *ptr)
 {
+       unsigned long flags;
        int ret;
 
        /* TLS metadata format is 1 byte for syndrome followed
@@ -139,9 +140,9 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
        BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
 
        idr_preload(GFP_KERNEL);
-       spin_lock_irq(idr_spinlock);
+       spin_lock_irqsave(idr_spinlock, flags);
        ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
-       spin_unlock_irq(idr_spinlock);
+       spin_unlock_irqrestore(idr_spinlock, flags);
        idr_preload_end();
 
        return ret;
@@ -157,6 +158,13 @@ static void mlx5_fpga_tls_release_swid(struct idr *idr,
        spin_unlock_irqrestore(idr_spinlock, flags);
 }
 
+static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
+                                  struct mlx5_fpga_device *fdev,
+                                  struct mlx5_fpga_dma_buf *buf, u8 status)
+{
+       kfree(buf);
+}
+
 struct mlx5_teardown_stream_context {
        struct mlx5_fpga_tls_command_context cmd;
        u32 swid;
@@ -178,9 +186,13 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
                        mlx5_fpga_err(fdev,
                                      "Teardown stream failed with syndrome = %d",
                                      syndrome);
-               else
+               else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
                        mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
-                                                  &fdev->tls->idr_spinlock,
+                                                  &fdev->tls->tx_idr_spinlock,
+                                                  ctx->swid);
+               else
+                       mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
+                                                  &fdev->tls->rx_idr_spinlock,
                                                   ctx->swid);
        }
        mlx5_fpga_tls_put_command_ctx(cmd);
@@ -196,6 +208,40 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
                 MLX5_GET(tls_flow, flow, direction_sx));
 }
 
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+                           u64 rcd_sn)
+{
+       struct mlx5_fpga_dma_buf *buf;
+       int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE;
+       void *flow;
+       void *cmd;
+       int ret;
+
+       buf = kzalloc(size, GFP_ATOMIC);
+       if (!buf)
+               return -ENOMEM;
+
+       cmd = (buf + 1);
+
+       rcu_read_lock();
+       flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+       rcu_read_unlock();
+       mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+
+       MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
+       MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
+       MLX5_SET(tls_cmd, cmd, tcp_sn, seq);
+       MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX);
+
+       buf->sg[0].data = cmd;
+       buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
+       buf->complete = mlx_tls_kfree_complete;
+
+       ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+
+       return ret;
+}
+
 static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
                                            void *flow, u32 swid, gfp_t flags)
 {
@@ -223,14 +269,18 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
                               mlx5_fpga_tls_teardown_completion);
 }
 
-void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
-                              gfp_t flags)
+void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+                           gfp_t flags, bool direction_sx)
 {
        struct mlx5_fpga_tls *tls = mdev->fpga->tls;
        void *flow;
 
        rcu_read_lock();
-       flow = idr_find(&tls->tx_idr, swid);
+       if (direction_sx)
+               flow = idr_find(&tls->tx_idr, swid);
+       else
+               flow = idr_find(&tls->rx_idr, swid);
+
        rcu_read_unlock();
 
        if (!flow) {
@@ -289,9 +339,11 @@ mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
                 * the command context because we might not have received
                 * the tx completion yet.
                 */
-               mlx5_fpga_tls_del_tx_flow(fdev->mdev,
-                                         MLX5_GET(tls_cmd, tls_cmd, swid),
-                                         GFP_ATOMIC);
+               mlx5_fpga_tls_del_flow(fdev->mdev,
+                                      MLX5_GET(tls_cmd, tls_cmd, swid),
+                                      GFP_ATOMIC,
+                                      MLX5_GET(tls_cmd, tls_cmd,
+                                               direction_sx));
        }
 
        mlx5_fpga_tls_put_command_ctx(cmd);
@@ -415,8 +467,7 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
        if (err)
                goto error;
 
-       if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 |
-                                MLX5_ACCEL_TLS_AES_GCM128))) {
+       if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) {
                err = -ENOTSUPP;
                goto error;
        }
@@ -438,7 +489,9 @@ int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
        INIT_LIST_HEAD(&tls->pending_cmds);
 
        idr_init(&tls->tx_idr);
-       spin_lock_init(&tls->idr_spinlock);
+       idr_init(&tls->rx_idr);
+       spin_lock_init(&tls->tx_idr_spinlock);
+       spin_lock_init(&tls->rx_idr_spinlock);
        fdev->tls = tls;
        return 0;
 
@@ -500,9 +553,9 @@ static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
        return 0;
 }
 
-static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
-                                 struct tls_crypto_info *crypto_info, u32 swid,
-                                 u32 tcp_sn)
+static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+                                  struct tls_crypto_info *crypto_info,
+                                  u32 swid, u32 tcp_sn)
 {
        u32 caps = mlx5_fpga_tls_device_caps(mdev);
        struct mlx5_setup_stream_context *ctx;
@@ -533,30 +586,42 @@ static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
        return ret;
 }
 
-int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
-                             struct tls_crypto_info *crypto_info,
-                             u32 start_offload_tcp_sn, u32 *p_swid)
+int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+                          struct tls_crypto_info *crypto_info,
+                          u32 start_offload_tcp_sn, u32 *p_swid,
+                          bool direction_sx)
 {
        struct mlx5_fpga_tls *tls = mdev->fpga->tls;
        int ret = -ENOMEM;
        u32 swid;
 
-       ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow);
+       if (direction_sx)
+               ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr,
+                                              &tls->tx_idr_spinlock, flow);
+       else
+               ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr,
+                                              &tls->rx_idr_spinlock, flow);
+
        if (ret < 0)
                return ret;
 
        swid = ret;
-       MLX5_SET(tls_flow, flow, direction_sx, 1);
+       MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0);
 
-       ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
-                                    start_offload_tcp_sn);
+       ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
+                                     start_offload_tcp_sn);
        if (ret && ret != -EINTR)
                goto free_swid;
 
        *p_swid = swid;
        return 0;
 free_swid:
-       mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid);
+       if (direction_sx)
+               mlx5_fpga_tls_release_swid(&tls->tx_idr,
+                                          &tls->tx_idr_spinlock, swid);
+       else
+               mlx5_fpga_tls_release_swid(&tls->rx_idr,
+                                          &tls->rx_idr_spinlock, swid);
 
        return ret;
 }
index 800a214e4e490f9c2267b60bf3fdff8a3ff31978..3b2e37bf76febd7aaa241386560b2707a1346e78 100644 (file)
@@ -46,15 +46,18 @@ struct mlx5_fpga_tls {
        struct mlx5_fpga_conn *conn;
 
        struct idr tx_idr;
-       spinlock_t idr_spinlock; /* protects the IDR */
+       struct idr rx_idr;
+       spinlock_t tx_idr_spinlock; /* protects the IDR */
+       spinlock_t rx_idr_spinlock; /* protects the IDR */
 };
 
-int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
-                             struct tls_crypto_info *crypto_info,
-                             u32 start_offload_tcp_sn, u32 *p_swid);
+int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
+                          struct tls_crypto_info *crypto_info,
+                          u32 start_offload_tcp_sn, u32 *p_swid,
+                          bool direction_sx);
 
-void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
-                              gfp_t flags);
+void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
+                           gfp_t flags, bool direction_sx);
 
 bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
 int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
@@ -65,4 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
        return mdev->fpga->tls->caps;
 }
 
+int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+                           u64 rcd_sn);
+
 #endif /* __MLX5_FPGA_TLS_H__ */
index 49a75d31185ecf25ff93c5f3a9beec6b48be28a1..f1a86cea86a0e24c5128e50f23bbba8a33112b7d 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/mutex.h>
 #include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
 
 #include "mlx5_core.h"
 #include "fs_core.h"
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
                        goto err;
        }
 
-       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+       if (MLX5_ESWITCH_MANAGER(dev)) {
                if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
                        err = init_fdb_root_ns(steering);
                        if (err)
index afd9f4fa22f40b70506fafa49f29cf647c22a959..41ad24f0de2cf9d171e586df3b9d167515d3cb03 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
+#include <linux/mlx5/eswitch.h>
 #include <linux/module.h>
 #include "mlx5_core.h"
 #include "../../mlxfw/mlxfw.h"
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
        }
 
        if (MLX5_CAP_GEN(dev, vport_group_manager) &&
-           MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+           MLX5_ESWITCH_MANAGER(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
                if (err)
                        return err;
        }
 
-       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+       if (MLX5_ESWITCH_MANAGER(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
                if (err)
                        return err;
index 7cb67122e8b5f04371651e1c1e2757acb281a36e..98359559c77e4286df95df17651a4b9f2ca8e427 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/etherdevice.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/eswitch.h>
 #include "mlx5_core.h"
 #include "lib/mpfs.h"
 
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
        int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
        struct mlx5_mpfs *mpfs;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
 {
        struct mlx5_mpfs *mpfs = dev->priv.mpfs;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return;
 
        WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
        u32 index;
        int err;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mutex_lock(&mpfs->lock);
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
        int err = 0;
        u32 index;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mutex_lock(&mpfs->lock);
index fa9d0760dd36ffda5c2c439f12bbdffab6320ccd..31a9cbd85689b01fc0bfe9e6c221d73cc7c5fe13 100644 (file)
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
 static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
                                   int inlen)
 {
-       u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+       u32 out[MLX5_ST_SZ_DW(qetc_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
                return -EOPNOTSUPP;
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
 static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
                                     int outlen)
 {
-       u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+       u32 in[MLX5_ST_SZ_DW(qetc_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
                return -EOPNOTSUPP;
index 2a8b529ce6dd176cbc29b9bb4b74cd1d1c48f671..a0674962f02c4d2a35d05c98f84436967703101c 100644 (file)
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                return -EBUSY;
        }
 
+       if (!MLX5_ESWITCH_MANAGER(dev))
+               goto enable_vfs_hca;
+
        err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
        if (err) {
                mlx5_core_warn(dev,
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                return err;
        }
 
+enable_vfs_hca:
        for (vf = 0; vf < num_vfs; vf++) {
                err = mlx5_core_enable_hca(dev, vf + 1);
                if (err) {
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
        }
 
 out:
-       mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+       if (MLX5_ESWITCH_MANAGER(dev))
+               mlx5_eswitch_disable_sriov(dev->priv.eswitch);
 
        if (mlx5_wait_for_vf_pages(dev))
                mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
index 719cecb182c6c4eb5579eb1b36601acb6c0d0c5c..7eecd5b07bb1931bf3041b1ae12b0f3f5154405a 100644 (file)
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
                return -EINVAL;
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return -EACCES;
-       if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
-               return -EOPNOTSUPP;
 
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
index 0b47126815b636246009123d6b6dbf14133db28c..2bd4c3184eba21d866ea8df66699a41145e3ec10 100644 (file)
@@ -229,6 +229,11 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
        return !wq->cur_sz;
 }
 
+static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq)
+{
+       return wq->fbc.sz_m1 - wq->cur_sz;
+}
+
 static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
 {
        return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
index f4d9c9975ac3d857f50ef255756ea23a7a11fdb5..82827a8d3d67cac73ac3f6c232e3f750553deddc 100644 (file)
@@ -30,7 +30,7 @@ config MLXSW_CORE_THERMAL
 
 config MLXSW_PCI
        tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
-       depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+       depends on PCI && HAS_IOMEM && MLXSW_CORE
        default m
        ---help---
          This is PCI bus implementation for Mellanox Technologies Switch ASICs.
index 0cadcabfe86f10ba5132cad20cb3a3a3ce035b19..981e621ef9c22579e94e6ae802a1875e0a5a50f7 100644 (file)
@@ -15,11 +15,16 @@ mlxsw_switchx2-objs         := switchx2.o
 obj-$(CONFIG_MLXSW_SPECTRUM)   += mlxsw_spectrum.o
 mlxsw_spectrum-objs            := spectrum.o spectrum_buffers.o \
                                   spectrum_switchdev.o spectrum_router.o \
-                                  spectrum_kvdl.o spectrum_acl_tcam.o \
-                                  spectrum_acl.o spectrum_flower.o \
-                                  spectrum_cnt.o spectrum_fid.o \
-                                  spectrum_ipip.o spectrum_acl_flex_actions.o \
-                                  spectrum_mr.o spectrum_mr_tcam.o \
+                                  spectrum1_kvdl.o spectrum_kvdl.o \
+                                  spectrum_acl_tcam.o spectrum_acl_ctcam.o \
+                                  spectrum1_acl_tcam.o \
+                                  spectrum_acl.o \
+                                  spectrum_flower.o spectrum_cnt.o \
+                                  spectrum_fid.o spectrum_ipip.o \
+                                  spectrum_acl_flex_actions.o \
+                                  spectrum_acl_flex_keys.o \
+                                  spectrum1_mr_tcam.o \
+                                  spectrum_mr_tcam.o spectrum_mr.o \
                                   spectrum_qdisc.o spectrum_span.o
 mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB)    += spectrum_dcb.o
 mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
index 3c0d882ba18380ae6d2f63d77f52163a603f1e47..72a6a8a2131e949cef88ebdc39b847167950bbdb 100644 (file)
@@ -351,9 +351,24 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
        block->first_set = mlxsw_afa_set_create(true);
        if (!block->first_set)
                goto err_first_set_create;
-       block->cur_set = block->first_set;
+
+       /* In case user instructs to have dummy first set, we leave it
+        * empty here and create another, real, set right away.
+        */
+       if (mlxsw_afa->ops->dummy_first_set) {
+               block->cur_set = mlxsw_afa_set_create(false);
+               if (!block->cur_set)
+                       goto err_second_set_create;
+               block->cur_set->prev = block->first_set;
+               block->first_set->next = block->cur_set;
+       } else {
+               block->cur_set = block->first_set;
+       }
+
        return block;
 
+err_second_set_create:
+       mlxsw_afa_set_destroy(block->first_set);
 err_first_set_create:
        kfree(block);
        return NULL;
@@ -415,11 +430,16 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
 }
 EXPORT_SYMBOL(mlxsw_afa_block_first_set);
 
-u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
 {
-       return block->first_set->kvdl_index;
+       /* First set is never in KVD linear. So the first set
+        * with valid KVD linear index is always the second one.
+        */
+       if (WARN_ON(!block->first_set->next))
+               return 0;
+       return block->first_set->next->kvdl_index;
 }
-EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
+EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index);
 
 int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
 {
index 3a155d1043845208e0809f83cee4227fcfe964c1..c18249ac28f702987223b35381a9dbe9fe759e07 100644 (file)
@@ -54,6 +54,7 @@ struct mlxsw_afa_ops {
                          bool ingress, int *p_span_id);
        void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
                           bool ingress);
+       bool dummy_first_set;
 };
 
 struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
@@ -64,7 +65,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa);
 void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
 int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
 char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
-u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
+u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block);
 int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
 int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
 int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
index b32a00972e836762bc8f07d50fb08223571bf109..bf645215f5148b8b75ec13c676b3da7518904a94 100644 (file)
@@ -43,6 +43,7 @@
 struct mlxsw_afk {
        struct list_head key_info_list;
        unsigned int max_blocks;
+       const struct mlxsw_afk_ops *ops;
        const struct mlxsw_afk_block *blocks;
        unsigned int blocks_count;
 };
@@ -69,8 +70,7 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
 }
 
 struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
-                                  const struct mlxsw_afk_block *blocks,
-                                  unsigned int blocks_count)
+                                  const struct mlxsw_afk_ops *ops)
 {
        struct mlxsw_afk *mlxsw_afk;
 
@@ -79,8 +79,9 @@ struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
                return NULL;
        INIT_LIST_HEAD(&mlxsw_afk->key_info_list);
        mlxsw_afk->max_blocks = max_blocks;
-       mlxsw_afk->blocks = blocks;
-       mlxsw_afk->blocks_count = blocks_count;
+       mlxsw_afk->ops = ops;
+       mlxsw_afk->blocks = ops->blocks;
+       mlxsw_afk->blocks_count = ops->blocks_count;
        WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk));
        return mlxsw_afk;
 }
@@ -415,45 +416,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
 }
 EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
 
-static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item,
-                                const struct mlxsw_item *output_item,
-                                char *storage, char *output_indexed)
-{
-       u32 value;
-
-       value = __mlxsw_item_get32(storage, storage_item, 0);
-       __mlxsw_item_set32(output_indexed, output_item, 0, value);
-}
-
-static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item,
-                                const struct mlxsw_item *output_item,
-                                char *storage, char *output_indexed)
-{
-       char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
-       char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
-       size_t len = output_item->size.bytes;
-
-       memcpy(output_data, storage_data, len);
-}
-
-#define MLXSW_AFK_KEY_BLOCK_SIZE 16
-
-static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
-                                int block_index, char *storage, char *output)
-{
-       char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE;
-       const struct mlxsw_item *storage_item = &elinst->info->item;
-       const struct mlxsw_item *output_item = &elinst->item;
-
-       if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
-               mlxsw_afk_encode_u32(storage_item, output_item,
-                                    storage, output_indexed);
-       else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
-               mlxsw_afk_encode_buf(storage_item, output_item,
-                                    storage, output_indexed);
-}
-
-void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+                     struct mlxsw_afk_key_info *key_info,
                      struct mlxsw_afk_element_values *values,
                      char *key, char *mask)
 {
@@ -466,10 +430,10 @@ void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
                                                       &block_index);
                if (!elinst)
                        continue;
-               mlxsw_afk_encode_one(elinst, block_index,
-                                    values->storage.key, key);
-               mlxsw_afk_encode_one(elinst, block_index,
-                                    values->storage.mask, mask);
+               mlxsw_afk->ops->encode_one(elinst, block_index,
+                                          values->storage.key, key);
+               mlxsw_afk->ops->encode_one(elinst, block_index,
+                                          values->storage.mask, mask);
        }
 }
 EXPORT_SYMBOL(mlxsw_afk_encode);
index 122506daa586070321c079d53c9d9ff6a9037c45..441636cd13d84f0a8cdbaf611757ff217b8fd13d 100644 (file)
 
 enum mlxsw_afk_element {
        MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
-       MLXSW_AFK_ELEMENT_DMAC,
-       MLXSW_AFK_ELEMENT_SMAC,
+       MLXSW_AFK_ELEMENT_DMAC_32_47,
+       MLXSW_AFK_ELEMENT_DMAC_0_31,
+       MLXSW_AFK_ELEMENT_SMAC_32_47,
+       MLXSW_AFK_ELEMENT_SMAC_0_31,
        MLXSW_AFK_ELEMENT_ETHERTYPE,
        MLXSW_AFK_ELEMENT_IP_PROTO,
-       MLXSW_AFK_ELEMENT_SRC_IP4,
-       MLXSW_AFK_ELEMENT_DST_IP4,
-       MLXSW_AFK_ELEMENT_SRC_IP6_HI,
-       MLXSW_AFK_ELEMENT_SRC_IP6_LO,
-       MLXSW_AFK_ELEMENT_DST_IP6_HI,
-       MLXSW_AFK_ELEMENT_DST_IP6_LO,
+       MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+       MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+       MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+       MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+       MLXSW_AFK_ELEMENT_DST_IP_96_127,
+       MLXSW_AFK_ELEMENT_DST_IP_64_95,
+       MLXSW_AFK_ELEMENT_DST_IP_32_63,
+       MLXSW_AFK_ELEMENT_DST_IP_0_31,
        MLXSW_AFK_ELEMENT_DST_L4_PORT,
        MLXSW_AFK_ELEMENT_SRC_L4_PORT,
        MLXSW_AFK_ELEMENT_VID,
@@ -99,9 +103,11 @@ struct mlxsw_afk_element_info {
  * define an internal storage geometry.
  */
 static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
-       MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
-       MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6),
-       MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
+       MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 8),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4),
        MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
        MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
        MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
@@ -112,12 +118,14 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
        MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
        MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
        MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
-       MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
-       MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
-       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
-       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
-       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
-       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
 };
 
 #define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
@@ -208,9 +216,15 @@ mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small,
 
 struct mlxsw_afk;
 
+struct mlxsw_afk_ops {
+       const struct mlxsw_afk_block *blocks;
+       unsigned int blocks_count;
+       void (*encode_one)(const struct mlxsw_afk_element_inst *elinst,
+                          int block_index, char *storage, char *output);
+};
+
 struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
-                                  const struct mlxsw_afk_block *blocks,
-                                  unsigned int blocks_count);
+                                  const struct mlxsw_afk_ops *ops);
 void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk);
 
 struct mlxsw_afk_key_info;
@@ -243,7 +257,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
                              enum mlxsw_afk_element element,
                              const char *key_value, const char *mask_value,
                              unsigned int len);
-void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
+                     struct mlxsw_afk_key_info *key_info,
                      struct mlxsw_afk_element_values *values,
                      char *key, char *mask);
 
index 1877d9f8a11a2b5d93fc2904fae773bb80fb630b..f76c17308a5124d5c13cfda6f1f9a0e05cb5d9b1 100644 (file)
@@ -2132,14 +2132,18 @@ MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4);
 
 /* reg_ptar_action_set_type
  * Type of action set to be used on this region.
- * For Spectrum, this is always type 2 - "flexible"
+ * For Spectrum and Spectrum-2, this is always type 2 - "flexible"
  * Access: WO
  */
 MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8);
 
+enum mlxsw_reg_ptar_key_type {
+       MLXSW_REG_PTAR_KEY_TYPE_FLEX = 0x50, /* Spetrum */
+       MLXSW_REG_PTAR_KEY_TYPE_FLEX2 = 0x51, /* Spectrum-2 */
+};
+
 /* reg_ptar_key_type
  * TCAM key type for the region.
- * For Spectrum, this is always type 0x50 - "FLEX_KEY"
  * Access: WO
  */
 MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8);
@@ -2182,13 +2186,14 @@ MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8,
                    MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false);
 
 static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op,
+                                      enum mlxsw_reg_ptar_key_type key_type,
                                       u16 region_size, u16 region_id,
                                       const char *tcam_region_info)
 {
        MLXSW_REG_ZERO(ptar, payload);
        mlxsw_reg_ptar_op_set(payload, op);
        mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */
-       mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */
+       mlxsw_reg_ptar_key_type_set(payload, key_type);
        mlxsw_reg_ptar_region_size_set(payload, region_size);
        mlxsw_reg_ptar_region_id_set(payload, region_id);
        mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info);
@@ -2397,6 +2402,15 @@ MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3);
  */
 MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
 
+/* reg_ptce2_priority
+ * Priority of the rule, higher values win. The range is 1..cap_kvd_size-1.
+ * Note: priority does not have to be unique per rule.
+ * Within a region, higher priority should have lower offset (no limitation
+ * between regions in a multi-region).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce2, priority, 0x04, 0, 24);
+
 /* reg_ptce2_tcam_region_info
  * Opaque object that represents the TCAM region.
  * Access: Index
@@ -2432,12 +2446,13 @@ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
 static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
                                        enum mlxsw_reg_ptce2_op op,
                                        const char *tcam_region_info,
-                                       u16 offset)
+                                       u16 offset, u32 priority)
 {
        MLXSW_REG_ZERO(ptce2, payload);
        mlxsw_reg_ptce2_v_set(payload, valid);
        mlxsw_reg_ptce2_op_set(payload, op);
        mlxsw_reg_ptce2_offset_set(payload, offset);
+       mlxsw_reg_ptce2_priority_set(payload, priority);
        mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
 }
 
@@ -3350,6 +3365,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
 
 enum mlxsw_reg_ppcnt_grp {
        MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+       MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
        MLXSW_REG_PPCNT_EXT_CNT = 0x5,
        MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
        MLXSW_REG_PPCNT_TC_CNT = 0x11,
@@ -3508,6 +3524,68 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
 MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
             MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
 
+/* Ethernet RFC 2819 Counter Group */
+
+/* reg_ppcnt_ether_stats_pkts64octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts64octets,
+            MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts65to127octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts65to127octets,
+            MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts128to255octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts128to255octets,
+            MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts256to511octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts256to511octets,
+            MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts512to1023octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts512to1023octets,
+            MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1024to1518octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1024to1518octets,
+            MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts1519to2047octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts1519to2047octets,
+            MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts2048to4095octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts2048to4095octets,
+            MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts4096to8191octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
+            MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x98, 0, 64);
+
+/* reg_ppcnt_ether_stats_pkts8192to10239octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
+            MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
+
 /* Ethernet Extended Counter Group Counters */
 
 /* reg_ppcnt_ecn_marked
@@ -4338,6 +4416,20 @@ MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
  */
 MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
 
+/* reg_ritr_if_vrrp_id_ipv6
+ * VRRP ID for IPv6
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv6, 0x1C, 8, 8);
+
+/* reg_ritr_if_vrrp_id_ipv4
+ * VRRP ID for IPv4
+ * Note: Reserved for RIF types other than VLAN, FID and Sub-port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8);
+
 /* VLAN Interface */
 
 /* reg_ritr_vlan_if_vid
index fd9299ccec7212d896846a747a996a35af388403..f672a7b71de7b616346957884e7723888d2d40df 100644 (file)
@@ -42,6 +42,8 @@ enum mlxsw_res_id {
        MLXSW_RES_ID_KVD_SIZE,
        MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
        MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+       MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
+       MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
        MLXSW_RES_ID_MAX_TRAP_GROUPS,
        MLXSW_RES_ID_CQE_V0,
        MLXSW_RES_ID_CQE_V1,
@@ -83,6 +85,8 @@ static u16 mlxsw_res_ids[] = {
        [MLXSW_RES_ID_KVD_SIZE] = 0x1001,
        [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
        [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+       [MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
+       [MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
        [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
        [MLXSW_RES_ID_CQE_V0] = 0x2210,
        [MLXSW_RES_ID_CQE_V1] = 0x2211,
index 968b88af2ef5ea93077186ff72d41d95aae60803..62c5f1c5bf62d314a18100e509bbd0e5301a6176 100644 (file)
 #include "spectrum_span.h"
 #include "../mlxfw/mlxfw.h"
 
-#define MLXSW_FWREV_MAJOR 13
-#define MLXSW_FWREV_MINOR 1620
-#define MLXSW_FWREV_SUBMINOR 192
-#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
+#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
 
-#define MLXSW_SP_FW_FILENAME \
-       "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
-       "." __stringify(MLXSW_FWREV_MINOR) \
-       "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
+#define MLXSW_SP1_FWREV_MAJOR 13
+#define MLXSW_SP1_FWREV_MINOR 1620
+#define MLXSW_SP1_FWREV_SUBMINOR 192
+
+static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
+       .major = MLXSW_SP1_FWREV_MAJOR,
+       .minor = MLXSW_SP1_FWREV_MINOR,
+       .subminor = MLXSW_SP1_FWREV_SUBMINOR,
+};
+
+#define MLXSW_SP1_FW_FILENAME \
+       "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
+       "." __stringify(MLXSW_SP1_FWREV_MINOR) \
+       "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
 
 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
 static const char mlxsw_sp_driver_version[] = "1.0";
@@ -338,29 +345,35 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
 {
        const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
+       const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
+       const char *fw_filename = mlxsw_sp->fw_filename;
        const struct firmware *firmware;
        int err;
 
+       /* Don't check if driver does not require it */
+       if (!req_rev || !fw_filename)
+               return 0;
+
        /* Validate driver & FW are compatible */
-       if (rev->major != MLXSW_FWREV_MAJOR) {
+       if (rev->major != req_rev->major) {
                WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
-                    rev->major, MLXSW_FWREV_MAJOR);
+                    rev->major, req_rev->major);
                return -EINVAL;
        }
-       if (MLXSW_FWREV_MINOR_TO_BRANCH(rev->minor) ==
-           MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR))
+       if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
+           MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor))
                return 0;
 
        dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
                 rev->major, rev->minor, rev->subminor);
        dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
-                MLXSW_SP_FW_FILENAME);
+                fw_filename);
 
-       err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
+       err = request_firmware_direct(&firmware, fw_filename,
                                      mlxsw_sp->bus_info->dev);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
-                       MLXSW_SP_FW_FILENAME);
+                       fw_filename);
                return err;
        }
 
@@ -1503,7 +1516,8 @@ static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
 
 static int
 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
-                                   struct tcf_block *block, bool ingress)
+                                   struct tcf_block *block, bool ingress,
+                                   struct netlink_ext_ack *extack)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        struct mlxsw_sp_acl_block *acl_block;
@@ -1518,7 +1532,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
                        return -ENOMEM;
                block_cb = __tcf_block_cb_register(block,
                                                   mlxsw_sp_setup_tc_block_cb_flower,
-                                                  mlxsw_sp, acl_block);
+                                                  mlxsw_sp, acl_block, extack);
                if (IS_ERR(block_cb)) {
                        err = PTR_ERR(block_cb);
                        goto err_cb_register;
@@ -1541,7 +1555,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
 
 err_block_bind:
        if (!tcf_block_cb_decref(block_cb)) {
-               __tcf_block_cb_unregister(block_cb);
+               __tcf_block_cb_unregister(block, block_cb);
 err_cb_register:
                mlxsw_sp_acl_block_destroy(acl_block);
        }
@@ -1571,7 +1585,7 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
        err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
                                        mlxsw_sp_port, ingress);
        if (!err && !tcf_block_cb_decref(block_cb)) {
-               __tcf_block_cb_unregister(block_cb);
+               __tcf_block_cb_unregister(block, block_cb);
                mlxsw_sp_acl_block_destroy(acl_block);
        }
 }
@@ -1596,11 +1610,12 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
        switch (f->command) {
        case TC_BLOCK_BIND:
                err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
-                                           mlxsw_sp_port);
+                                           mlxsw_sp_port, f->extack);
                if (err)
                        return err;
                err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
-                                                         f->block, ingress);
+                                                         f->block, ingress,
+                                                         f->extack);
                if (err) {
                        tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
                        return err;
@@ -1873,6 +1888,52 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
 
 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
 
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
+       {
+               .str = "ether_pkts64octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
+       },
+       {
+               .str = "ether_pkts65to127octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
+       },
+       {
+               .str = "ether_pkts128to255octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
+       },
+       {
+               .str = "ether_pkts256to511octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
+       },
+       {
+               .str = "ether_pkts512to1023octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
+       },
+       {
+               .str = "ether_pkts1024to1518octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
+       },
+       {
+               .str = "ether_pkts1519to2047octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
+       },
+       {
+               .str = "ether_pkts2048to4095octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
+       },
+       {
+               .str = "ether_pkts4096to8191octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
+       },
+       {
+               .str = "ether_pkts8192to10239octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
+       },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
+       ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+
 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
        {
                .str = "rx_octets_prio",
@@ -1964,6 +2025,11 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
                               ETH_GSTRING_LEN);
                        p += ETH_GSTRING_LEN;
                }
+               for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
+                       memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
 
                for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
                        mlxsw_sp_port_get_prio_strings(&p, i);
@@ -2003,10 +2069,14 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
                               int *p_len, enum mlxsw_reg_ppcnt_grp grp)
 {
        switch (grp) {
-       case  MLXSW_REG_PPCNT_IEEE_8023_CNT:
+       case MLXSW_REG_PPCNT_IEEE_8023_CNT:
                *p_hw_stats = mlxsw_sp_port_hw_stats;
                *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
                break;
+       case MLXSW_REG_PPCNT_RFC_2819_CNT:
+               *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
+               *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+               break;
        case MLXSW_REG_PPCNT_PRIO_CNT:
                *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
                *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
@@ -2056,6 +2126,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
                                  data, data_index);
        data_index = MLXSW_SP_PORT_HW_STATS_LEN;
 
+       /* RFC 2819 Counters */
+       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
+                                 data, data_index);
+       data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+
        /* Per-Priority Counters */
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
                __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
@@ -3371,6 +3446,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
        MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
        MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
        MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+       MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
+       MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
        /* PKT Sample trap */
        MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
                  false, SP_IP2ME, DISCARD),
@@ -3619,6 +3696,14 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
        int err;
 
+       mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
+       mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
+       mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
+       mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
+       mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
+       mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
+       mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
+
        mlxsw_sp->core = mlxsw_core;
        mlxsw_sp->bus_info = mlxsw_bus_info;
 
@@ -3876,7 +3961,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
        if (err)
                return err;
 
-       err = mlxsw_sp_kvdl_resources_register(mlxsw_core);
+       err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
        if  (err)
                return err;
 
@@ -4397,7 +4482,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
                if (!is_vlan_dev(upper_dev) &&
                    !netif_is_lag_master(upper_dev) &&
                    !netif_is_bridge_master(upper_dev) &&
-                   !netif_is_ovs_master(upper_dev)) {
+                   !netif_is_ovs_master(upper_dev) &&
+                   !netif_is_macvlan(upper_dev)) {
                        NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
                        return -EINVAL;
                }
@@ -4423,6 +4509,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
                        NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
                        return -EINVAL;
                }
+               if (netif_is_macvlan(upper_dev) &&
+                   !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
+                       NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+                       return -EOPNOTSUPP;
+               }
                if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
                        NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
                        return -EINVAL;
@@ -4461,6 +4552,9 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
                                err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
                        else
                                mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
+               } else if (netif_is_macvlan(upper_dev)) {
+                       if (!info->linking)
+                               mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
                }
                break;
        }
@@ -4545,8 +4639,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
        switch (event) {
        case NETDEV_PRECHANGEUPPER:
                upper_dev = info->upper_dev;
-               if (!netif_is_bridge_master(upper_dev)) {
-                       NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers");
+               if (!netif_is_bridge_master(upper_dev) &&
+                   !netif_is_macvlan(upper_dev)) {
+                       NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
                        return -EINVAL;
                }
                if (!info->linking)
@@ -4558,6 +4653,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
                        NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
                        return -EINVAL;
                }
+               if (netif_is_macvlan(upper_dev) &&
+                   !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+                       NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+                       return -EOPNOTSUPP;
+               }
                break;
        case NETDEV_CHANGEUPPER:
                upper_dev = info->upper_dev;
@@ -4571,6 +4671,9 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
                                mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
                                                           vlan_dev,
                                                           upper_dev);
+               } else if (netif_is_macvlan(upper_dev)) {
+                       if (!info->linking)
+                               mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
                } else {
                        err = -EINVAL;
                        WARN_ON(1);
@@ -4620,6 +4723,64 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
        return 0;
 }
 
+static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
+                                          unsigned long event, void *ptr)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+       struct netdev_notifier_changeupper_info *info = ptr;
+       struct netlink_ext_ack *extack;
+       struct net_device *upper_dev;
+
+       if (!mlxsw_sp)
+               return 0;
+
+       extack = netdev_notifier_info_to_extack(&info->info);
+
+       switch (event) {
+       case NETDEV_PRECHANGEUPPER:
+               upper_dev = info->upper_dev;
+               if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
+                       NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+                       return -EOPNOTSUPP;
+               }
+               if (!info->linking)
+                       break;
+               if (netif_is_macvlan(upper_dev) &&
+                   !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
+                       NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+                       return -EOPNOTSUPP;
+               }
+               break;
+       case NETDEV_CHANGEUPPER:
+               upper_dev = info->upper_dev;
+               if (info->linking)
+                       break;
+               if (netif_is_macvlan(upper_dev))
+                       mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+               break;
+       }
+
+       return 0;
+}
+
+static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
+                                           unsigned long event, void *ptr)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+       struct netdev_notifier_changeupper_info *info = ptr;
+       struct netlink_ext_ack *extack;
+
+       if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
+               return 0;
+
+       extack = netdev_notifier_info_to_extack(&info->info);
+
+       /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
+       NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+
+       return -EOPNOTSUPP;
+}
+
 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
 {
        struct netdev_notifier_changeupper_info *info = ptr;
@@ -4661,6 +4822,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
                err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
        else if (is_vlan_dev(dev))
                err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
+       else if (netif_is_bridge_master(dev))
+               err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+       else if (netif_is_macvlan(dev))
+               err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
 
        return notifier_from_errno(err);
 }
@@ -4737,4 +4902,4 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
 MODULE_DESCRIPTION("Mellanox Spectrum driver");
 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
-MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
index 4a519d8edec8fe0410e5548182a389848011b2d2..8aa717a7d11caf987834e97e6e648ea4edfa1ff1 100644 (file)
@@ -145,6 +145,9 @@ struct mlxsw_sp_acl;
 struct mlxsw_sp_counter_pool;
 struct mlxsw_sp_fid_core;
 struct mlxsw_sp_kvdl;
+struct mlxsw_sp_kvdl_ops;
+struct mlxsw_sp_mr_tcam_ops;
+struct mlxsw_sp_acl_tcam_ops;
 
 struct mlxsw_sp {
        struct mlxsw_sp_port **ports;
@@ -168,6 +171,13 @@ struct mlxsw_sp {
                struct mlxsw_sp_span_entry *entries;
                int entries_count;
        } span;
+       const struct mlxsw_fw_rev *req_rev;
+       const char *fw_filename;
+       const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+       const struct mlxsw_afa_ops *afa_ops;
+       const struct mlxsw_afk_ops *afk_ops;
+       const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
+       const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
 };
 
 static inline struct mlxsw_sp_upper *
@@ -407,6 +417,8 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+                             const struct net_device *macvlan_dev);
 int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
                            unsigned long event, void *ptr);
 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
@@ -435,15 +447,59 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
 void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
 
 /* spectrum_kvdl.c */
+enum mlxsw_sp_kvdl_entry_type {
+       MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+       MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+       MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+       MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+};
+
+static inline unsigned int
+mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
+{
+       switch (type) {
+       case MLXSW_SP_KVDL_ENTRY_TYPE_ADJ: /* fall through */
+       case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
+       case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
+       case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
+       default:
+               return 1;
+       }
+}
+
+struct mlxsw_sp_kvdl_ops {
+       size_t priv_size;
+       int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+       void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+       int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv,
+                    enum mlxsw_sp_kvdl_entry_type type,
+                    unsigned int entry_count, u32 *p_entry_index);
+       void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv,
+                    enum mlxsw_sp_kvdl_entry_type type,
+                    unsigned int entry_count, int entry_index);
+       int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv,
+                               enum mlxsw_sp_kvdl_entry_type type,
+                               unsigned int entry_count,
+                               unsigned int *p_alloc_count);
+       int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv);
+};
+
 int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
 void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
-                       u32 *p_entry_index);
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
-int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
-                                  unsigned int entry_count,
-                                  unsigned int *p_alloc_size);
-int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+                       enum mlxsw_sp_kvdl_entry_type type,
+                       unsigned int entry_count, u32 *p_entry_index);
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+                       enum mlxsw_sp_kvdl_entry_type type,
+                       unsigned int entry_count, int entry_index);
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+                                   enum mlxsw_sp_kvdl_entry_type type,
+                                   unsigned int entry_count,
+                                   unsigned int *p_alloc_count);
+
+/* spectrum1_kvdl.c */
+extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops;
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
 
 struct mlxsw_sp_acl_rule_info {
        unsigned int priority;
@@ -452,44 +508,14 @@ struct mlxsw_sp_acl_rule_info {
        unsigned int counter_index;
 };
 
-enum mlxsw_sp_acl_profile {
-       MLXSW_SP_ACL_PROFILE_FLOWER,
-};
-
-struct mlxsw_sp_acl_profile_ops {
-       size_t ruleset_priv_size;
-       int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
-                          void *priv, void *ruleset_priv);
-       void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
-       int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
-                           struct mlxsw_sp_port *mlxsw_sp_port,
-                           bool ingress);
-       void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
-                              struct mlxsw_sp_port *mlxsw_sp_port,
-                              bool ingress);
-       u16 (*ruleset_group_id)(void *ruleset_priv);
-       size_t rule_priv_size;
-       int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
-                       void *ruleset_priv, void *rule_priv,
-                       struct mlxsw_sp_acl_rule_info *rulei);
-       void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
-       int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
-                                bool *activity);
-};
-
-struct mlxsw_sp_acl_ops {
-       size_t priv_size;
-       int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
-       void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
-       const struct mlxsw_sp_acl_profile_ops *
-                       (*profile_ops)(struct mlxsw_sp *mlxsw_sp,
-                                      enum mlxsw_sp_acl_profile profile);
-};
-
 struct mlxsw_sp_acl_block;
 struct mlxsw_sp_acl_ruleset;
 
 /* spectrum_acl.c */
+enum mlxsw_sp_acl_profile {
+       MLXSW_SP_ACL_PROFILE_FLOWER,
+};
+
 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
 struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
 unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
@@ -582,7 +608,44 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
 
 /* spectrum_acl_tcam.c */
-extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
+struct mlxsw_sp_acl_tcam;
+struct mlxsw_sp_acl_tcam_region;
+
+struct mlxsw_sp_acl_tcam_ops {
+       enum mlxsw_reg_ptar_key_type key_type;
+       size_t priv_size;
+       int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv,
+                   struct mlxsw_sp_acl_tcam *tcam);
+       void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+       size_t region_priv_size;
+       int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+                          struct mlxsw_sp_acl_tcam_region *region);
+       void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv);
+       size_t chunk_priv_size;
+       void (*chunk_init)(void *region_priv, void *chunk_priv,
+                          unsigned int priority);
+       void (*chunk_fini)(void *chunk_priv);
+       size_t entry_priv_size;
+       int (*entry_add)(struct mlxsw_sp *mlxsw_sp,
+                        void *region_priv, void *chunk_priv,
+                        void *entry_priv,
+                        struct mlxsw_sp_acl_rule_info *rulei);
+       void (*entry_del)(struct mlxsw_sp *mlxsw_sp,
+                         void *region_priv, void *chunk_priv,
+                         void *entry_priv);
+       int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
+                                 void *region_priv, void *entry_priv,
+                                 bool *activity);
+};
+
+/* spectrum1_acl_tcam.c */
+extern const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops;
+
+/* spectrum_acl_flex_actions.c */
+extern const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops;
+
+/* spectrum_acl_flex_keys.c */
+extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
 
 /* spectrum_flower.c */
 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
@@ -631,4 +694,37 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
 int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
 void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
 
+/* spectrum_mr.c */
+enum mlxsw_sp_mr_route_prio {
+       MLXSW_SP_MR_ROUTE_PRIO_SG,
+       MLXSW_SP_MR_ROUTE_PRIO_STARG,
+       MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+       __MLXSW_SP_MR_ROUTE_PRIO_MAX
+};
+
+#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
+
+struct mlxsw_sp_mr_route_key;
+
+struct mlxsw_sp_mr_tcam_ops {
+       size_t priv_size;
+       int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+       void (*fini)(void *priv);
+       size_t route_priv_size;
+       int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
+                           void *route_priv,
+                           struct mlxsw_sp_mr_route_key *key,
+                           struct mlxsw_afa_block *afa_block,
+                           enum mlxsw_sp_mr_route_prio prio);
+       void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
+                             void *route_priv,
+                             struct mlxsw_sp_mr_route_key *key);
+       int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+                           struct mlxsw_sp_mr_route_key *key,
+                           struct mlxsw_afa_block *afa_block);
+};
+
+/* spectrum1_mr_tcam.c */
+extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
+
 #endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
new file mode 100644 (file)
index 0000000..04f0c9c
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+struct mlxsw_sp1_acl_tcam_region {
+       struct mlxsw_sp_acl_ctcam_region cregion;
+       struct mlxsw_sp_acl_tcam_region *region;
+       struct {
+               struct mlxsw_sp_acl_ctcam_chunk cchunk;
+               struct mlxsw_sp_acl_ctcam_entry centry;
+               struct mlxsw_sp_acl_rule_info *rulei;
+       } catchall;
+};
+
+struct mlxsw_sp1_acl_tcam_chunk {
+       struct mlxsw_sp_acl_ctcam_chunk cchunk;
+};
+
+struct mlxsw_sp1_acl_tcam_entry {
+       struct mlxsw_sp_acl_ctcam_entry centry;
+};
+
+static int mlxsw_sp1_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
+                                  struct mlxsw_sp_acl_tcam *tcam)
+{
+       return 0;
+}
+
+static void mlxsw_sp1_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+}
+
+static int
+mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp1_acl_tcam_region *region)
+{
+       struct mlxsw_sp_acl_rule_info *rulei;
+       int err;
+
+       mlxsw_sp_acl_ctcam_chunk_init(&region->cregion,
+                                     &region->catchall.cchunk,
+                                     MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
+       rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+       if (IS_ERR(rulei)) {
+               err = PTR_ERR(rulei);
+               goto err_rulei_create;
+       }
+       err = mlxsw_sp_acl_rulei_act_continue(rulei);
+       if (WARN_ON(err))
+               goto err_rulei_act_continue;
+       err = mlxsw_sp_acl_rulei_commit(rulei);
+       if (err)
+               goto err_rulei_commit;
+       err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+                                          &region->catchall.cchunk,
+                                          &region->catchall.centry,
+                                          rulei, false);
+       if (err)
+               goto err_entry_add;
+       region->catchall.rulei = rulei;
+       return 0;
+
+err_entry_add:
+err_rulei_commit:
+err_rulei_act_continue:
+       mlxsw_sp_acl_rulei_destroy(rulei);
+err_rulei_create:
+       mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+       return err;
+}
+
+static void
+mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp1_acl_tcam_region *region)
+{
+       struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
+
+       mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+                                    &region->catchall.cchunk,
+                                    &region->catchall.centry);
+       mlxsw_sp_acl_rulei_destroy(rulei);
+       mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
+                              struct mlxsw_sp_acl_tcam_region *_region)
+{
+       struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+       int err;
+
+       err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &region->cregion,
+                                            _region);
+       if (err)
+               return err;
+       err = mlxsw_sp1_acl_ctcam_region_catchall_add(mlxsw_sp, region);
+       if (err)
+               goto err_catchall_add;
+       region->region = _region;
+       return 0;
+
+err_catchall_add:
+       mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+       return err;
+}
+
+static void
+mlxsw_sp1_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
+{
+       struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+
+       mlxsw_sp1_acl_ctcam_region_catchall_del(mlxsw_sp, region);
+       mlxsw_sp_acl_ctcam_region_fini(&region->cregion);
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
+                                         unsigned int priority)
+{
+       struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+       struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+       mlxsw_sp_acl_ctcam_chunk_init(&region->cregion, &chunk->cchunk,
+                                     priority);
+}
+
+static void mlxsw_sp1_acl_tcam_chunk_fini(void *chunk_priv)
+{
+       struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+
+       mlxsw_sp_acl_ctcam_chunk_fini(&chunk->cchunk);
+}
+
+static int mlxsw_sp1_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+                                       void *region_priv, void *chunk_priv,
+                                       void *entry_priv,
+                                       struct mlxsw_sp_acl_rule_info *rulei)
+{
+       struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+       struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+       struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+       return mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &region->cregion,
+                                           &chunk->cchunk, &entry->centry,
+                                           rulei, false);
+}
+
+static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+                                        void *region_priv, void *chunk_priv,
+                                        void *entry_priv)
+{
+       struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+       struct mlxsw_sp1_acl_tcam_chunk *chunk = chunk_priv;
+       struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+
+       mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
+                                    &chunk->cchunk, &entry->centry);
+}
+
+static int
+mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+                                            struct mlxsw_sp_acl_tcam_region *_region,
+                                            unsigned int offset,
+                                            bool *activity)
+{
+       char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+       int err;
+
+       mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
+                            _region->tcam_region_info, offset, 0);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+       if (err)
+               return err;
+       *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
+       return 0;
+}
+
+static int
+mlxsw_sp1_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
+                                     void *region_priv, void *entry_priv,
+                                     bool *activity)
+{
+       struct mlxsw_sp1_acl_tcam_region *region = region_priv;
+       struct mlxsw_sp1_acl_tcam_entry *entry = entry_priv;
+       unsigned int offset;
+
+       offset = mlxsw_sp_acl_ctcam_entry_offset(&entry->centry);
+       return mlxsw_sp1_acl_tcam_region_entry_activity_get(mlxsw_sp,
+                                                           region->region,
+                                                           offset, activity);
+}
+
+const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = {
+       .key_type               = MLXSW_REG_PTAR_KEY_TYPE_FLEX,
+       .priv_size              = 0,
+       .init                   = mlxsw_sp1_acl_tcam_init,
+       .fini                   = mlxsw_sp1_acl_tcam_fini,
+       .region_priv_size       = sizeof(struct mlxsw_sp1_acl_tcam_region),
+       .region_init            = mlxsw_sp1_acl_tcam_region_init,
+       .region_fini            = mlxsw_sp1_acl_tcam_region_fini,
+       .chunk_priv_size        = sizeof(struct mlxsw_sp1_acl_tcam_chunk),
+       .chunk_init             = mlxsw_sp1_acl_tcam_chunk_init,
+       .chunk_fini             = mlxsw_sp1_acl_tcam_chunk_fini,
+       .entry_priv_size        = sizeof(struct mlxsw_sp1_acl_tcam_entry),
+       .entry_add              = mlxsw_sp1_acl_tcam_entry_add,
+       .entry_del              = mlxsw_sp1_acl_tcam_entry_del,
+       .entry_activity_get     = mlxsw_sp1_acl_tcam_entry_activity_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
new file mode 100644 (file)
index 0000000..0d45838
--- /dev/null
@@ -0,0 +1,459 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+
+#define MLXSW_SP1_KVDL_SINGLE_BASE 0
+#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP1_KVDL_SINGLE_END \
+       (MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1)
+
+#define MLXSW_SP1_KVDL_CHUNKS_BASE \
+       (MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE)
+#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152
+#define MLXSW_SP1_KVDL_CHUNKS_END \
+       (MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \
+       (MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \
+       (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE)
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \
+       (MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1)
+
+#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1
+#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32
+#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
+
+struct mlxsw_sp1_kvdl_part_info {
+       unsigned int part_index;
+       unsigned int start_index;
+       unsigned int end_index;
+       unsigned int alloc_size;
+       enum mlxsw_sp_resource_id resource_id;
+};
+
+enum mlxsw_sp1_kvdl_part_id {
+       MLXSW_SP1_KVDL_PART_ID_SINGLE,
+       MLXSW_SP1_KVDL_PART_ID_CHUNKS,
+       MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS,
+};
+
+#define MLXSW_SP1_KVDL_PART_INFO(id)                           \
+[MLXSW_SP1_KVDL_PART_ID_##id] = {                              \
+       .start_index = MLXSW_SP1_KVDL_##id##_BASE,              \
+       .end_index = MLXSW_SP1_KVDL_##id##_END,                 \
+       .alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE,         \
+       .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id,       \
+}
+
+static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
+       MLXSW_SP1_KVDL_PART_INFO(SINGLE),
+       MLXSW_SP1_KVDL_PART_INFO(CHUNKS),
+       MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS),
+};
+
+#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info)
+
+struct mlxsw_sp1_kvdl_part {
+       struct mlxsw_sp1_kvdl_part_info info;
+       unsigned long usage[0]; /* Entries */
+};
+
+struct mlxsw_sp1_kvdl {
+       struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN];
+};
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl,
+                              unsigned int alloc_size)
+{
+       struct mlxsw_sp1_kvdl_part *part, *min_part = NULL;
+       int i;
+
+       for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+               part = kvdl->parts[i];
+               if (alloc_size <= part->info.alloc_size &&
+                   (!min_part ||
+                    part->info.alloc_size <= min_part->info.alloc_size))
+                       min_part = part;
+       }
+
+       return min_part ?: ERR_PTR(-ENOBUFS);
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index)
+{
+       struct mlxsw_sp1_kvdl_part *part;
+       int i;
+
+       for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+               part = kvdl->parts[i];
+               if (kvdl_index >= part->info.start_index &&
+                   kvdl_index <= part->info.end_index)
+                       return part;
+       }
+
+       return ERR_PTR(-EINVAL);
+}
+
+static u32
+mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info,
+                            unsigned int entry_index)
+{
+       return info->start_index + entry_index * info->alloc_size;
+}
+
+static unsigned int
+mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info,
+                             u32 kvdl_index)
+{
+       return (kvdl_index - info->start_index) / info->alloc_size;
+}
+
+static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part,
+                                    u32 *p_kvdl_index)
+{
+       const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+       unsigned int entry_index, nr_entries;
+
+       nr_entries = (info->end_index - info->start_index + 1) /
+                    info->alloc_size;
+       entry_index = find_first_zero_bit(part->usage, nr_entries);
+       if (entry_index == nr_entries)
+               return -ENOBUFS;
+       __set_bit(entry_index, part->usage);
+
+       *p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index);
+
+       return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part,
+                                    u32 kvdl_index)
+{
+       const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+       unsigned int entry_index;
+
+       entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index);
+       __clear_bit(entry_index, part->usage);
+}
+
+static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
+                               enum mlxsw_sp_kvdl_entry_type type,
+                               unsigned int entry_count,
+                               u32 *p_entry_index)
+{
+       struct mlxsw_sp1_kvdl *kvdl = priv;
+       struct mlxsw_sp1_kvdl_part *part;
+
+       /* Find partition with smallest allocation size satisfying the
+        * requested size.
+        */
+       part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+       if (IS_ERR(part))
+               return PTR_ERR(part);
+
+       return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index);
+}
+
+static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
+                               enum mlxsw_sp_kvdl_entry_type type,
+                               unsigned int entry_count, int entry_index)
+{
+       struct mlxsw_sp1_kvdl *kvdl = priv;
+       struct mlxsw_sp1_kvdl_part *part;
+
+       part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index);
+       if (IS_ERR(part))
+               return;
+       mlxsw_sp1_kvdl_part_free(part, entry_index);
+}
+
+static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+                                          void *priv,
+                                          enum mlxsw_sp_kvdl_entry_type type,
+                                          unsigned int entry_count,
+                                          unsigned int *p_alloc_size)
+{
+       struct mlxsw_sp1_kvdl *kvdl = priv;
+       struct mlxsw_sp1_kvdl_part *part;
+
+       part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
+       if (IS_ERR(part))
+               return PTR_ERR(part);
+
+       *p_alloc_size = part->info.alloc_size;
+
+       return 0;
+}
+
+static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part,
+                                      struct mlxsw_sp1_kvdl_part *part_prev,
+                                      unsigned int size)
+{
+       if (!part_prev) {
+               part->info.end_index = size - 1;
+       } else {
+               part->info.start_index = part_prev->info.end_index + 1;
+               part->info.end_index = part->info.start_index + size - 1;
+       }
+}
+
+static struct mlxsw_sp1_kvdl_part *
+mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+                        const struct mlxsw_sp1_kvdl_part_info *info,
+                        struct mlxsw_sp1_kvdl_part *part_prev)
+{
+       struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+       struct mlxsw_sp1_kvdl_part *part;
+       bool need_update = true;
+       unsigned int nr_entries;
+       size_t usage_size;
+       u64 resource_size;
+       int err;
+
+       err = devlink_resource_size_get(devlink, info->resource_id,
+                                       &resource_size);
+       if (err) {
+               need_update = false;
+               resource_size = info->end_index - info->start_index + 1;
+       }
+
+       nr_entries = div_u64(resource_size, info->alloc_size);
+       usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
+       part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+       if (!part)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(&part->info, info, sizeof(part->info));
+
+       if (need_update)
+               mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size);
+       return part;
+}
+
+static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part)
+{
+       kfree(part);
+}
+
+static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
+                                    struct mlxsw_sp1_kvdl *kvdl)
+{
+       const struct mlxsw_sp1_kvdl_part_info *info;
+       struct mlxsw_sp1_kvdl_part *part_prev = NULL;
+       int err, i;
+
+       for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
+               info = &mlxsw_sp1_kvdl_parts_info[i];
+               kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info,
+                                                         part_prev);
+               if (IS_ERR(kvdl->parts[i])) {
+                       err = PTR_ERR(kvdl->parts[i]);
+                       goto err_kvdl_part_init;
+               }
+               part_prev = kvdl->parts[i];
+       }
+       return 0;
+
+err_kvdl_part_init:
+       for (i--; i >= 0; i--)
+               mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+       return err;
+}
+
+static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl)
+{
+       int i;
+
+       for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+               mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
+}
+
+static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part)
+{
+       const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
+       unsigned int nr_entries;
+       int bit = -1;
+       u64 occ = 0;
+
+       nr_entries = (info->end_index -
+                     info->start_index + 1) /
+                     info->alloc_size;
+       while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
+               < nr_entries)
+               occ += info->alloc_size;
+       return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_occ_get(void *priv)
+{
+       const struct mlxsw_sp1_kvdl *kvdl = priv;
+       u64 occ = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
+               occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]);
+
+       return occ;
+}
+
+static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv)
+{
+       const struct mlxsw_sp1_kvdl *kvdl = priv;
+       struct mlxsw_sp1_kvdl_part *part;
+
+       part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE];
+       return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv)
+{
+       const struct mlxsw_sp1_kvdl *kvdl = priv;
+       struct mlxsw_sp1_kvdl_part *part;
+
+       part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS];
+       return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv)
+{
+       const struct mlxsw_sp1_kvdl *kvdl = priv;
+       struct mlxsw_sp1_kvdl_part *part;
+
+       part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS];
+       return mlxsw_sp1_kvdl_part_occ(part);
+}
+
+static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+       struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+       struct mlxsw_sp1_kvdl *kvdl = priv;
+       int err;
+
+       err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
+       if (err)
+               return err;
+       devlink_resource_occ_get_register(devlink,
+                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
+                                         mlxsw_sp1_kvdl_occ_get,
+                                         kvdl);
+       devlink_resource_occ_get_register(devlink,
+                                         MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+                                         mlxsw_sp1_kvdl_single_occ_get,
+                                         kvdl);
+       devlink_resource_occ_get_register(devlink,
+                                         MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+                                         mlxsw_sp1_kvdl_chunks_occ_get,
+                                         kvdl);
+       devlink_resource_occ_get_register(devlink,
+                                         MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+                                         mlxsw_sp1_kvdl_large_chunks_occ_get,
+                                         kvdl);
+       return 0;
+}
+
+static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+       struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+       struct mlxsw_sp1_kvdl *kvdl = priv;
+
+       devlink_resource_occ_get_unregister(devlink,
+                                           MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+       devlink_resource_occ_get_unregister(devlink,
+                                           MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+       devlink_resource_occ_get_unregister(devlink,
+                                           MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+       devlink_resource_occ_get_unregister(devlink,
+                                           MLXSW_SP_RESOURCE_KVD_LINEAR);
+       mlxsw_sp1_kvdl_parts_fini(kvdl);
+}
+
+const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = {
+       .priv_size = sizeof(struct mlxsw_sp1_kvdl),
+       .init = mlxsw_sp1_kvdl_init,
+       .fini = mlxsw_sp1_kvdl_fini,
+       .alloc = mlxsw_sp1_kvdl_alloc,
+       .free = mlxsw_sp1_kvdl_free,
+       .alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query,
+};
+
+int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+{
+       struct devlink *devlink = priv_to_devlink(mlxsw_core);
+       static struct devlink_resource_size_params size_params;
+       u32 kvdl_max_size;
+       int err;
+
+       kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+                       MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
+                       MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
+
+       devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+                                         MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
+                                         DEVLINK_RESOURCE_UNIT_ENTRY);
+       err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+                                       MLXSW_SP1_KVDL_SINGLE_SIZE,
+                                       MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+                                       MLXSW_SP_RESOURCE_KVD_LINEAR,
+                                       &size_params);
+       if (err)
+               return err;
+
+       devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+                                         MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
+                                         DEVLINK_RESOURCE_UNIT_ENTRY);
+       err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+                                       MLXSW_SP1_KVDL_CHUNKS_SIZE,
+                                       MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+                                       MLXSW_SP_RESOURCE_KVD_LINEAR,
+                                       &size_params);
+       if (err)
+               return err;
+
+       devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+                                         MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
+                                         DEVLINK_RESOURCE_UNIT_ENTRY);
+       err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+                                       MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
+                                       MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+                                       MLXSW_SP_RESOURCE_KVD_LINEAR,
+                                       &size_params);
+       return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
new file mode 100644 (file)
index 0000000..fc649fe
--- /dev/null
@@ -0,0 +1,374 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_mr.h"
+
+struct mlxsw_sp1_mr_tcam_region {
+       struct mlxsw_sp *mlxsw_sp;
+       enum mlxsw_reg_rtar_key_type rtar_key_type;
+       struct parman *parman;
+       struct parman_prio *parman_prios;
+};
+
+struct mlxsw_sp1_mr_tcam {
+       struct mlxsw_sp1_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+};
+
+struct mlxsw_sp1_mr_tcam_route {
+       struct parman_item parman_item;
+       struct parman_prio *parman_prio;
+};
+
+static int mlxsw_sp1_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
+                                          struct parman_item *parman_item,
+                                          struct mlxsw_sp_mr_route_key *key,
+                                          struct mlxsw_afa_block *afa_block)
+{
+       char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+       switch (key->proto) {
+       case MLXSW_SP_L3_PROTO_IPV4:
+               mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
+                                         key->vrid,
+                                         MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+                                         ntohl(key->group.addr4),
+                                         ntohl(key->group_mask.addr4),
+                                         ntohl(key->source.addr4),
+                                         ntohl(key->source_mask.addr4),
+                                         mlxsw_afa_block_first_set(afa_block));
+               break;
+       case MLXSW_SP_L3_PROTO_IPV6:
+               mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
+                                         key->vrid,
+                                         MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+                                         key->group.addr6,
+                                         key->group_mask.addr6,
+                                         key->source.addr6,
+                                         key->source_mask.addr6,
+                                         mlxsw_afa_block_first_set(afa_block));
+       }
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp,
+                                         struct parman_item *parman_item,
+                                         struct mlxsw_sp_mr_route_key *key)
+{
+       struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
+       char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+       switch (key->proto) {
+       case MLXSW_SP_L3_PROTO_IPV4:
+               mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
+                                         key->vrid, 0, 0, 0, 0, 0, 0, NULL);
+               break;
+       case MLXSW_SP_L3_PROTO_IPV6:
+               mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
+                                         key->vrid, 0, 0, zero_addr, zero_addr,
+                                         zero_addr, zero_addr, NULL);
+               break;
+       }
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static struct mlxsw_sp1_mr_tcam_region *
+mlxsw_sp1_mr_tcam_protocol_region(struct mlxsw_sp1_mr_tcam *mr_tcam,
+                                 enum mlxsw_sp_l3proto proto)
+{
+       return &mr_tcam->tcam_regions[proto];
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_parman_item_add(struct mlxsw_sp1_mr_tcam *mr_tcam,
+                                       struct mlxsw_sp1_mr_tcam_route *route,
+                                       struct mlxsw_sp_mr_route_key *key,
+                                       enum mlxsw_sp_mr_route_prio prio)
+{
+       struct mlxsw_sp1_mr_tcam_region *tcam_region;
+       int err;
+
+       tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+       err = parman_item_add(tcam_region->parman,
+                             &tcam_region->parman_prios[prio],
+                             &route->parman_item);
+       if (err)
+               return err;
+
+       route->parman_prio = &tcam_region->parman_prios[prio];
+       return 0;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_parman_item_remove(struct mlxsw_sp1_mr_tcam *mr_tcam,
+                                          struct mlxsw_sp1_mr_tcam_route *route,
+                                          struct mlxsw_sp_mr_route_key *key)
+{
+       struct mlxsw_sp1_mr_tcam_region *tcam_region;
+
+       tcam_region = mlxsw_sp1_mr_tcam_protocol_region(mr_tcam, key->proto);
+       parman_item_remove(tcam_region->parman,
+                          route->parman_prio, &route->parman_item);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+                              void *route_priv,
+                              struct mlxsw_sp_mr_route_key *key,
+                              struct mlxsw_afa_block *afa_block,
+                              enum mlxsw_sp_mr_route_prio prio)
+{
+       struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+       struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+       int err;
+
+       err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route,
+                                                     key, prio);
+       if (err)
+               return err;
+
+       err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+                                             key, afa_block);
+       if (err)
+               goto err_route_replace;
+       return 0;
+
+err_route_replace:
+       mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+       return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
+                               void *route_priv,
+                               struct mlxsw_sp_mr_route_key *key)
+{
+       struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+       struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+
+       mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key);
+       mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key);
+}
+
+static int
+mlxsw_sp1_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
+                              void *route_priv,
+                              struct mlxsw_sp_mr_route_key *key,
+                              struct mlxsw_afa_block *afa_block)
+{
+       struct mlxsw_sp1_mr_tcam_route *route = route_priv;
+
+       return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+                                              key, afa_block);
+}
+
+#define MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP 16
+
+static int
+mlxsw_sp1_mr_tcam_region_alloc(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+       struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+       char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+       mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
+                           mr_tcam_region->rtar_key_type,
+                           MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_free(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+       struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+       char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+       mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
+                           mr_tcam_region->rtar_key_type, 0);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static int mlxsw_sp1_mr_tcam_region_parman_resize(void *priv,
+                                                 unsigned long new_count)
+{
+       struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+       struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+       char rtar_pl[MLXSW_REG_RTAR_LEN];
+       u64 max_tcam_rules;
+
+       max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+       if (new_count > max_tcam_rules)
+               return -EINVAL;
+       mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
+                           mr_tcam_region->rtar_key_type, new_count);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void mlxsw_sp1_mr_tcam_region_parman_move(void *priv,
+                                                unsigned long from_index,
+                                                unsigned long to_index,
+                                                unsigned long count)
+{
+       struct mlxsw_sp1_mr_tcam_region *mr_tcam_region = priv;
+       struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+       char rrcr_pl[MLXSW_REG_RRCR_LEN];
+
+       mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
+                           from_index, count,
+                           mr_tcam_region->rtar_key_type, to_index);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
+}
+
+static const struct parman_ops mlxsw_sp1_mr_tcam_region_parman_ops = {
+       .base_count     = MLXSW_SP1_MR_TCAM_REGION_BASE_COUNT,
+       .resize_step    = MLXSW_SP1_MR_TCAM_REGION_RESIZE_STEP,
+       .resize         = mlxsw_sp1_mr_tcam_region_parman_resize,
+       .move           = mlxsw_sp1_mr_tcam_region_parman_move,
+       .algo           = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static int
+mlxsw_sp1_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
+                             struct mlxsw_sp1_mr_tcam_region *mr_tcam_region,
+                             enum mlxsw_reg_rtar_key_type rtar_key_type)
+{
+       struct parman_prio *parman_prios;
+       struct parman *parman;
+       int err;
+       int i;
+
+       mr_tcam_region->rtar_key_type = rtar_key_type;
+       mr_tcam_region->mlxsw_sp = mlxsw_sp;
+
+       err = mlxsw_sp1_mr_tcam_region_alloc(mr_tcam_region);
+       if (err)
+               return err;
+
+       parman = parman_create(&mlxsw_sp1_mr_tcam_region_parman_ops,
+                              mr_tcam_region);
+       if (!parman) {
+               err = -ENOMEM;
+               goto err_parman_create;
+       }
+       mr_tcam_region->parman = parman;
+
+       parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
+                                    sizeof(*parman_prios), GFP_KERNEL);
+       if (!parman_prios) {
+               err = -ENOMEM;
+               goto err_parman_prios_alloc;
+       }
+       mr_tcam_region->parman_prios = parman_prios;
+
+       for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+               parman_prio_init(mr_tcam_region->parman,
+                                &mr_tcam_region->parman_prios[i], i);
+       return 0;
+
+err_parman_prios_alloc:
+       parman_destroy(parman);
+err_parman_create:
+       mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+       return err;
+}
+
+static void
+mlxsw_sp1_mr_tcam_region_fini(struct mlxsw_sp1_mr_tcam_region *mr_tcam_region)
+{
+       int i;
+
+       for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+               parman_prio_fini(&mr_tcam_region->parman_prios[i]);
+       kfree(mr_tcam_region->parman_prios);
+       parman_destroy(mr_tcam_region->parman);
+       mlxsw_sp1_mr_tcam_region_free(mr_tcam_region);
+}
+
+static int mlxsw_sp1_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+       struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+       struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+       u32 rtar_key;
+       int err;
+
+       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+               return -EIO;
+
+       rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
+       err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+                                           &region[MLXSW_SP_L3_PROTO_IPV4],
+                                           rtar_key);
+       if (err)
+               return err;
+
+       rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
+       err = mlxsw_sp1_mr_tcam_region_init(mlxsw_sp,
+                                           &region[MLXSW_SP_L3_PROTO_IPV6],
+                                           rtar_key);
+       if (err)
+               goto err_ipv6_region_init;
+
+       return 0;
+
+err_ipv6_region_init:
+       mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+       return err;
+}
+
+static void mlxsw_sp1_mr_tcam_fini(void *priv)
+{
+       struct mlxsw_sp1_mr_tcam *mr_tcam = priv;
+       struct mlxsw_sp1_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+
+       mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
+       mlxsw_sp1_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+}
+
+const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops = {
+       .priv_size = sizeof(struct mlxsw_sp1_mr_tcam),
+       .init = mlxsw_sp1_mr_tcam_init,
+       .fini = mlxsw_sp1_mr_tcam_fini,
+       .route_priv_size = sizeof(struct mlxsw_sp1_mr_tcam_route),
+       .route_create = mlxsw_sp1_mr_tcam_route_create,
+       .route_destroy = mlxsw_sp1_mr_tcam_route_destroy,
+       .route_update = mlxsw_sp1_mr_tcam_route_update,
+};
index 79b1fa27a9a439301a544f621367f8b925cdf52d..217621d79e26f1005b7f11422bde6bc7dc752126 100644 (file)
 #include "spectrum.h"
 #include "core_acl_flex_keys.h"
 #include "core_acl_flex_actions.h"
-#include "spectrum_acl_flex_keys.h"
+#include "spectrum_acl_tcam.h"
 
 struct mlxsw_sp_acl {
        struct mlxsw_sp *mlxsw_sp;
        struct mlxsw_afk *afk;
        struct mlxsw_sp_fid *dummy_fid;
-       const struct mlxsw_sp_acl_ops *ops;
        struct rhashtable ruleset_ht;
        struct list_head rules;
        struct {
@@ -62,8 +61,7 @@ struct mlxsw_sp_acl {
                unsigned long interval; /* ms */
 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
        } rule_activity_update;
-       unsigned long priv[0];
-       /* priv has to be always the last item */
+       struct mlxsw_sp_acl_tcam tcam;
 };
 
 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
@@ -339,7 +337,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
        if (err)
                goto err_rhashtable_init;
 
-       err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
+       err = ops->ruleset_add(mlxsw_sp, &acl->tcam, ruleset->priv);
        if (err)
                goto err_ops_ruleset_add;
 
@@ -409,7 +407,7 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
        struct mlxsw_sp_acl_ruleset *ruleset;
 
-       ops = acl->ops->profile_ops(mlxsw_sp, profile);
+       ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
        if (!ops)
                return ERR_PTR(-EINVAL);
        ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
@@ -427,7 +425,7 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
        struct mlxsw_sp_acl_ruleset *ruleset;
 
-       ops = acl->ops->profile_ops(mlxsw_sp, profile);
+       ops = mlxsw_sp_acl_tcam_profile_ops(mlxsw_sp, profile);
        if (!ops)
                return ERR_PTR(-EINVAL);
 
@@ -487,7 +485,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
                                 unsigned int priority)
 {
-       rulei->priority = priority;
+       rulei->priority = priority >> 16;
 }
 
 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
@@ -634,7 +632,8 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
        int err;
 
        mlxsw_sp_acl_ruleset_ref_inc(ruleset);
-       rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
+       rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp),
+                      GFP_KERNEL);
        if (!rule) {
                err = -ENOMEM;
                goto err_alloc;
@@ -825,20 +824,20 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
 
 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
 {
-       const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
        struct mlxsw_sp_fid *fid;
        struct mlxsw_sp_acl *acl;
+       size_t alloc_size;
        int err;
 
-       acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
+       alloc_size = sizeof(*acl) + mlxsw_sp_acl_tcam_priv_size(mlxsw_sp);
+       acl = kzalloc(alloc_size, GFP_KERNEL);
        if (!acl)
                return -ENOMEM;
        mlxsw_sp->acl = acl;
        acl->mlxsw_sp = mlxsw_sp;
        acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
                                                       ACL_FLEX_KEYS),
-                                   mlxsw_sp_afk_blocks,
-                                   MLXSW_SP_AFK_BLOCKS_COUNT);
+                                   mlxsw_sp->afk_ops);
        if (!acl->afk) {
                err = -ENOMEM;
                goto err_afk_create;
@@ -857,12 +856,10 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
        acl->dummy_fid = fid;
 
        INIT_LIST_HEAD(&acl->rules);
-       err = acl_ops->init(mlxsw_sp, acl->priv);
+       err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
        if (err)
                goto err_acl_ops_init;
 
-       acl->ops = acl_ops;
-
        /* Create the delayed work for the rule activity_update */
        INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
                          mlxsw_sp_acl_rul_activity_update_work);
@@ -884,10 +881,9 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
 {
        struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
-       const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
 
        cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
-       acl_ops->fini(mlxsw_sp, acl->priv);
+       mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
        WARN_ON(!list_empty(&acl->rules));
        mlxsw_sp_fid_put(acl->dummy_fid);
        rhashtable_destroy(&acl->ruleset_ht);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
new file mode 100644 (file)
index 0000000..ef0d4c0
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "core.h"
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+static int
+mlxsw_sp_acl_ctcam_region_resize(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_acl_tcam_region *region,
+                                u16 new_size)
+{
+       char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+       mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
+                           region->key_type, new_size, region->id,
+                           region->tcam_region_info);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_move(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_acl_tcam_region *region,
+                              u16 src_offset, u16 dst_offset, u16 size)
+{
+       char prcr_pl[MLXSW_REG_PRCR_LEN];
+
+       mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
+                           region->tcam_region_info, src_offset,
+                           region->tcam_region_info, dst_offset, size);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
+}
+
+static int
+mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+                                      struct mlxsw_sp_acl_tcam_region *region,
+                                      unsigned int offset,
+                                      struct mlxsw_sp_acl_rule_info *rulei,
+                                      bool fillup_priority)
+{
+       struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+       char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+       char *act_set;
+       u32 priority;
+       char *mask;
+       char *key;
+       int err;
+
+       err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority,
+                                            fillup_priority);
+       if (err)
+               return err;
+
+       mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+                            region->tcam_region_info, offset, priority);
+       key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
+       mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
+       mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
+
+       /* Only the first action set belongs here, the rest is in KVD */
+       act_set = mlxsw_afa_block_first_set(rulei->act_block);
+       mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+static void
+mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+                                      struct mlxsw_sp_acl_tcam_region *region,
+                                      unsigned int offset)
+{
+       char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+
+       mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+                            region->tcam_region_info, offset, 0);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
+                                                  unsigned long new_count)
+{
+       struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+       struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+       struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+       u64 max_tcam_rules;
+
+       max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+       if (new_count > max_tcam_rules)
+               return -EINVAL;
+       return mlxsw_sp_acl_ctcam_region_resize(mlxsw_sp, region, new_count);
+}
+
+static void mlxsw_sp_acl_ctcam_region_parman_move(void *priv,
+                                                 unsigned long from_index,
+                                                 unsigned long to_index,
+                                                 unsigned long count)
+{
+       struct mlxsw_sp_acl_ctcam_region *cregion = priv;
+       struct mlxsw_sp_acl_tcam_region *region = cregion->region;
+       struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+
+       mlxsw_sp_acl_ctcam_region_move(mlxsw_sp, region,
+                                      from_index, to_index, count);
+}
+
+static const struct parman_ops mlxsw_sp_acl_ctcam_region_parman_ops = {
+       .base_count     = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+       .resize_step    = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
+       .resize         = mlxsw_sp_acl_ctcam_region_parman_resize,
+       .move           = mlxsw_sp_acl_ctcam_region_parman_move,
+       .algo           = PARMAN_ALGO_TYPE_LSORT,
+};
+
+int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_acl_ctcam_region *cregion,
+                                  struct mlxsw_sp_acl_tcam_region *region)
+{
+       cregion->region = region;
+       cregion->parman = parman_create(&mlxsw_sp_acl_ctcam_region_parman_ops,
+                                       cregion);
+       if (!cregion->parman)
+               return -ENOMEM;
+       return 0;
+}
+
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion)
+{
+       parman_destroy(cregion->parman);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+                                  struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+                                  unsigned int priority)
+{
+       parman_prio_init(cregion->parman, &cchunk->parman_prio, priority);
+}
+
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk)
+{
+       parman_prio_fini(&cchunk->parman_prio);
+}
+
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_acl_ctcam_region *cregion,
+                                struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+                                struct mlxsw_sp_acl_ctcam_entry *centry,
+                                struct mlxsw_sp_acl_rule_info *rulei,
+                                bool fillup_priority)
+{
+       int err;
+
+       err = parman_item_add(cregion->parman, &cchunk->parman_prio,
+                             &centry->parman_item);
+       if (err)
+               return err;
+
+       err = mlxsw_sp_acl_ctcam_region_entry_insert(mlxsw_sp, cregion->region,
+                                                    centry->parman_item.index,
+                                                    rulei, fillup_priority);
+       if (err)
+               goto err_rule_insert;
+       return 0;
+
+err_rule_insert:
+       parman_item_remove(cregion->parman, &cchunk->parman_prio,
+                          &centry->parman_item);
+       return err;
+}
+
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+                                 struct mlxsw_sp_acl_ctcam_region *cregion,
+                                 struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+                                 struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+       mlxsw_sp_acl_ctcam_region_entry_remove(mlxsw_sp, cregion->region,
+                                              centry->parman_item.index);
+       parman_item_remove(cregion->parman, &cchunk->parman_prio,
+                          &centry->parman_item);
+}
index 510ce48d87f7470fff02d524fe13416af2ae3c5f..6a7c3406b72494913bfd5ee8e84dc6c3ce134258 100644 (file)
@@ -37,8 +37,6 @@
 #include "core_acl_flex_actions.h"
 #include "spectrum_span.h"
 
-#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
-
 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
                                     char *enc_actions, bool is_first)
 {
@@ -53,8 +51,8 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
        if (is_first)
                return 0;
 
-       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE,
-                                 &kvdl_index);
+       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+                                 1, &kvdl_index);
        if (err)
                return err;
        mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
@@ -65,7 +63,8 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
        return 0;
 
 err_pefa_write:
-       mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+       mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+                          1, kvdl_index);
        return err;
 }
 
@@ -76,7 +75,8 @@ static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
 
        if (is_first)
                return;
-       mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+       mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
+                          1, kvdl_index);
 }
 
 static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
@@ -87,7 +87,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
        u32 kvdl_index;
        int err;
 
-       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
+       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+                                 1, &kvdl_index);
        if (err)
                return err;
        mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
@@ -98,7 +99,8 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
        return 0;
 
 err_ppbs_write:
-       mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+       mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+                          1, kvdl_index);
        return err;
 }
 
@@ -106,7 +108,8 @@ static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
 {
        struct mlxsw_sp *mlxsw_sp = priv;
 
-       mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+       mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
+                          1, kvdl_index);
 }
 
 static int
@@ -154,7 +157,7 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
        mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
 }
 
-static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
+const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
        .kvdl_set_add           = mlxsw_sp_act_kvdl_set_add,
        .kvdl_set_del           = mlxsw_sp_act_kvdl_set_del,
        .kvdl_fwd_entry_add     = mlxsw_sp_act_kvdl_fwd_entry_add,
@@ -169,7 +172,7 @@ int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
 {
        mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
                                                            ACL_ACTIONS_PER_SET),
-                                        &mlxsw_sp_act_afa_ops, mlxsw_sp);
+                                        mlxsw_sp->afa_ops, mlxsw_sp);
        return PTR_ERR_OR_ZERO(mlxsw_sp->afa);
 }
 
similarity index 59%
rename from drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
rename to drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index fb80318284546d45fd558a9b69884a0c0428e122..80f22b7c21da7d2131337d7b187f7dff0f329804 100644 (file)
@@ -1,7 +1,7 @@
 /*
- * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "spectrum.h"
+#include "item.h"
 #include "core_acl_flex_keys.h"
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
-       MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
+       MLXSW_AFK_ELEMENT_INST_BUF(DMAC_32_47, 0x00, 2),
+       MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x02, 4),
        MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
        MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
-       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
-       MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
+       MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x00, 2),
+       MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x02, 4),
        MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3),
        MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12),
-       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
-       MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6),
+       MLXSW_AFK_ELEMENT_INST_BUF(SMAC_32_47, 0x02, 2),
+       MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
        MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
-       MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
+       MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
        MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
-       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
-       MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32),
+       MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
        MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
-       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 8),
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = {
-       MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
+       MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
        MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2),
        MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8),
        MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6),
@@ -84,27 +88,31 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
-       MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8),
+       MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x00, 4),
+       MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x04, 4),
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
-       MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8),
+       MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x00, 4),
+       MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
        MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
-       MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8),
+       MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_32_63, 0x00, 4),
+       MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x04, 4),
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
-       MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8),
+       MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_96_127, 0x00, 4),
+       MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_64_95, 0x04, 4),
 };
 
 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
        MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
 };
 
-static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = {
+static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
        MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
        MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
        MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
@@ -119,6 +127,48 @@ static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = {
        MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
 };
 
-#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks)
-
-#endif
+static void mlxsw_sp1_afk_encode_u32(const struct mlxsw_item *storage_item,
+                                    const struct mlxsw_item *output_item,
+                                    char *storage, char *output_indexed)
+{
+       u32 value;
+
+       value = __mlxsw_item_get32(storage, storage_item, 0);
+       __mlxsw_item_set32(output_indexed, output_item, 0, value);
+}
+
+static void mlxsw_sp1_afk_encode_buf(const struct mlxsw_item *storage_item,
+                                    const struct mlxsw_item *output_item,
+                                    char *storage, char *output_indexed)
+{
+       char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
+       char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
+       size_t len = output_item->size.bytes;
+
+       memcpy(output_data, storage_data, len);
+}
+
+#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
+
+static void
+mlxsw_sp1_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
+                        int block_index, char *storage, char *output)
+{
+       unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+       char *output_indexed = output + offset;
+       const struct mlxsw_item *storage_item = &elinst->info->item;
+       const struct mlxsw_item *output_item = &elinst->item;
+
+       if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
+               mlxsw_sp1_afk_encode_u32(storage_item, output_item,
+                                        storage, output_indexed);
+       else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
+               mlxsw_sp1_afk_encode_buf(storage_item, output_item,
+                                        storage, output_indexed);
+}
+
+const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
+       .blocks         = mlxsw_sp1_afk_blocks,
+       .blocks_count   = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
+       .encode_one     = mlxsw_sp1_afk_encode_one,
+};
index ad1b548e3cace26f149ec9b9ac7eea1e493f8b6e..53fe51a8d720891389a4e9493bafab8a98b139b8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
 #include <linux/list.h>
 #include <linux/rhashtable.h>
 #include <linux/netdevice.h>
-#include <linux/parman.h>
 
 #include "reg.h"
 #include "core.h"
 #include "resources.h"
 #include "spectrum.h"
+#include "spectrum_acl_tcam.h"
 #include "core_acl_flex_keys.h"
 
-struct mlxsw_sp_acl_tcam {
-       unsigned long *used_regions; /* bit array */
-       unsigned int max_regions;
-       unsigned long *used_groups;  /* bit array */
-       unsigned int max_groups;
-       unsigned int max_group_size;
-};
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+       return ops->priv_size;
+}
 
-static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+                          struct mlxsw_sp_acl_tcam *tcam)
 {
-       struct mlxsw_sp_acl_tcam *tcam = priv;
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
        u64 max_tcam_regions;
        u64 max_regions;
        u64 max_groups;
@@ -88,21 +88,53 @@ static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
        tcam->max_groups = max_groups;
        tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
                                                 ACL_MAX_GROUP_SIZE);
+
+       err = ops->init(mlxsw_sp, tcam->priv, tcam);
+       if (err)
+               goto err_tcam_init;
+
        return 0;
 
+err_tcam_init:
+       kfree(tcam->used_groups);
 err_alloc_used_groups:
        kfree(tcam->used_regions);
        return err;
 }
 
-static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+                           struct mlxsw_sp_acl_tcam *tcam)
 {
-       struct mlxsw_sp_acl_tcam *tcam = priv;
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 
+       ops->fini(mlxsw_sp, tcam->priv);
        kfree(tcam->used_groups);
        kfree(tcam->used_regions);
 }
 
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_acl_rule_info *rulei,
+                                  u32 *priority, bool fillup_priority)
+{
+       u64 max_priority;
+
+       if (!fillup_priority) {
+               *priority = 0;
+               return 0;
+       }
+
+       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
+               return -EIO;
+
+       max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
+       if (rulei->priority > max_priority)
+               return -EINVAL;
+
+       /* Unlike in TC, in HW, higher number means higher priority. */
+       *priority = max_priority - rulei->priority;
+       return 0;
+}
+
 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
                                           u16 *p_id)
 {
@@ -159,35 +191,21 @@ struct mlxsw_sp_acl_tcam_group {
        unsigned int patterns_count;
 };
 
-struct mlxsw_sp_acl_tcam_region {
-       struct list_head list; /* Member of a TCAM group */
-       struct list_head chunk_list; /* List of chunks under this region */
-       struct parman *parman;
-       struct mlxsw_sp *mlxsw_sp;
-       struct mlxsw_sp_acl_tcam_group *group;
-       u16 id; /* ACL ID and region ID - they are same */
-       char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
-       struct mlxsw_afk_key_info *key_info;
-       struct {
-               struct parman_prio parman_prio;
-               struct parman_item parman_item;
-               struct mlxsw_sp_acl_rule_info *rulei;
-       } catchall;
-};
-
 struct mlxsw_sp_acl_tcam_chunk {
        struct list_head list; /* Member of a TCAM region */
        struct rhash_head ht_node; /* Member of a chunk HT */
        unsigned int priority; /* Priority within the region and group */
-       struct parman_prio parman_prio;
        struct mlxsw_sp_acl_tcam_group *group;
        struct mlxsw_sp_acl_tcam_region *region;
        unsigned int ref_count;
+       unsigned long priv[0];
+       /* priv has to be always the last item */
 };
 
 struct mlxsw_sp_acl_tcam_entry {
-       struct parman_item parman_item;
        struct mlxsw_sp_acl_tcam_chunk *chunk;
+       unsigned long priv[0];
+       /* priv has to be always the last item */
 };
 
 static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
@@ -441,9 +459,6 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
        memcpy(out, elusage, sizeof(*out));
 }
 
-#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
-#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
-
 static int
 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
                               struct mlxsw_sp_acl_tcam_region *region)
@@ -455,6 +470,7 @@ mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
        int err;
 
        mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
+                           region->key_type,
                            MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
                            region->id, region->tcam_region_info);
        encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
@@ -477,23 +493,12 @@ mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
 {
        char ptar_pl[MLXSW_REG_PTAR_LEN];
 
-       mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
+       mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
+                           region->key_type, 0, region->id,
                            region->tcam_region_info);
        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
 }
 
-static int
-mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
-                               struct mlxsw_sp_acl_tcam_region *region,
-                               u16 new_size)
-{
-       char ptar_pl[MLXSW_REG_PTAR_LEN];
-
-       mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
-                           new_size, region->id, region->tcam_region_info);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
-}
-
 static int
 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
                                struct mlxsw_sp_acl_tcam_region *region)
@@ -516,193 +521,22 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
 }
 
-static int
-mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
-                                     struct mlxsw_sp_acl_tcam_region *region,
-                                     unsigned int offset,
-                                     struct mlxsw_sp_acl_rule_info *rulei)
-{
-       char ptce2_pl[MLXSW_REG_PTCE2_LEN];
-       char *act_set;
-       char *mask;
-       char *key;
-
-       mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
-                            region->tcam_region_info, offset);
-       key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
-       mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
-       mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
-
-       /* Only the first action set belongs here, the rest is in KVD */
-       act_set = mlxsw_afa_block_first_set(rulei->act_block);
-       mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
-
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-}
-
-static void
-mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
-                                     struct mlxsw_sp_acl_tcam_region *region,
-                                     unsigned int offset)
-{
-       char ptce2_pl[MLXSW_REG_PTCE2_LEN];
-
-       mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
-                            region->tcam_region_info, offset);
-       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-}
-
-static int
-mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
-                                           struct mlxsw_sp_acl_tcam_region *region,
-                                           unsigned int offset,
-                                           bool *activity)
-{
-       char ptce2_pl[MLXSW_REG_PTCE2_LEN];
-       int err;
-
-       mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
-                            region->tcam_region_info, offset);
-       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
-       if (err)
-               return err;
-       *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
-       return 0;
-}
-
-#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
-
-static int
-mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
-                                     struct mlxsw_sp_acl_tcam_region *region)
-{
-       struct parman_prio *parman_prio = &region->catchall.parman_prio;
-       struct parman_item *parman_item = &region->catchall.parman_item;
-       struct mlxsw_sp_acl_rule_info *rulei;
-       int err;
-
-       parman_prio_init(region->parman, parman_prio,
-                        MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
-       err = parman_item_add(region->parman, parman_prio, parman_item);
-       if (err)
-               goto err_parman_item_add;
-
-       rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
-       if (IS_ERR(rulei)) {
-               err = PTR_ERR(rulei);
-               goto err_rulei_create;
-       }
-
-       err = mlxsw_sp_acl_rulei_act_continue(rulei);
-       if (WARN_ON(err))
-               goto err_rulei_act_continue;
-
-       err = mlxsw_sp_acl_rulei_commit(rulei);
-       if (err)
-               goto err_rulei_commit;
-
-       err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
-                                                   parman_item->index, rulei);
-       region->catchall.rulei = rulei;
-       if (err)
-               goto err_rule_insert;
-
-       return 0;
-
-err_rule_insert:
-err_rulei_commit:
-err_rulei_act_continue:
-       mlxsw_sp_acl_rulei_destroy(rulei);
-err_rulei_create:
-       parman_item_remove(region->parman, parman_prio, parman_item);
-err_parman_item_add:
-       parman_prio_fini(parman_prio);
-       return err;
-}
-
-static void
-mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
-                                     struct mlxsw_sp_acl_tcam_region *region)
-{
-       struct parman_prio *parman_prio = &region->catchall.parman_prio;
-       struct parman_item *parman_item = &region->catchall.parman_item;
-       struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
-
-       mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
-                                             parman_item->index);
-       mlxsw_sp_acl_rulei_destroy(rulei);
-       parman_item_remove(region->parman, parman_prio, parman_item);
-       parman_prio_fini(parman_prio);
-}
-
-static void
-mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
-                             struct mlxsw_sp_acl_tcam_region *region,
-                             u16 src_offset, u16 dst_offset, u16 size)
-{
-       char prcr_pl[MLXSW_REG_PRCR_LEN];
-
-       mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
-                           region->tcam_region_info, src_offset,
-                           region->tcam_region_info, dst_offset, size);
-       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
-}
-
-static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
-                                                 unsigned long new_count)
-{
-       struct mlxsw_sp_acl_tcam_region *region = priv;
-       struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
-       u64 max_tcam_rules;
-
-       max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
-       if (new_count > max_tcam_rules)
-               return -EINVAL;
-       return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
-}
-
-static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
-                                                unsigned long from_index,
-                                                unsigned long to_index,
-                                                unsigned long count)
-{
-       struct mlxsw_sp_acl_tcam_region *region = priv;
-       struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
-
-       mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
-                                     from_index, to_index, count);
-}
-
-static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
-       .base_count     = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
-       .resize_step    = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
-       .resize         = mlxsw_sp_acl_tcam_region_parman_resize,
-       .move           = mlxsw_sp_acl_tcam_region_parman_move,
-       .algo           = PARMAN_ALGO_TYPE_LSORT,
-};
-
 static struct mlxsw_sp_acl_tcam_region *
 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
                                struct mlxsw_sp_acl_tcam *tcam,
                                struct mlxsw_afk_element_usage *elusage)
 {
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
        struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
        struct mlxsw_sp_acl_tcam_region *region;
        int err;
 
-       region = kzalloc(sizeof(*region), GFP_KERNEL);
+       region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
        if (!region)
                return ERR_PTR(-ENOMEM);
        INIT_LIST_HEAD(&region->chunk_list);
        region->mlxsw_sp = mlxsw_sp;
 
-       region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
-                                      region);
-       if (!region->parman) {
-               err = -ENOMEM;
-               goto err_parman_create;
-       }
-
        region->key_info = mlxsw_afk_key_info_get(afk, elusage);
        if (IS_ERR(region->key_info)) {
                err = PTR_ERR(region->key_info);
@@ -713,6 +547,7 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
        if (err)
                goto err_region_id_get;
 
+       region->key_type = ops->key_type;
        err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
        if (err)
                goto err_tcam_region_alloc;
@@ -721,13 +556,13 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
        if (err)
                goto err_tcam_region_enable;
 
-       err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
+       err = ops->region_init(mlxsw_sp, region->priv, region);
        if (err)
-               goto err_tcam_region_catchall_add;
+               goto err_tcam_region_init;
 
        return region;
 
-err_tcam_region_catchall_add:
+err_tcam_region_init:
        mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
 err_tcam_region_enable:
        mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
@@ -736,8 +571,6 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
 err_region_id_get:
        mlxsw_afk_key_info_put(region->key_info);
 err_key_info_get:
-       parman_destroy(region->parman);
-err_parman_create:
        kfree(region);
        return ERR_PTR(err);
 }
@@ -746,12 +579,13 @@ static void
 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
                                 struct mlxsw_sp_acl_tcam_region *region)
 {
-       mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+       ops->region_fini(mlxsw_sp, region->priv);
        mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
        mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
        mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
        mlxsw_afk_key_info_put(region->key_info);
-       parman_destroy(region->parman);
        kfree(region);
 }
 
@@ -826,13 +660,14 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
                               unsigned int priority,
                               struct mlxsw_afk_element_usage *elusage)
 {
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
        struct mlxsw_sp_acl_tcam_chunk *chunk;
        int err;
 
        if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
                return ERR_PTR(-EINVAL);
 
-       chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+       chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
        if (!chunk)
                return ERR_PTR(-ENOMEM);
        chunk->priority = priority;
@@ -844,7 +679,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
        if (err)
                goto err_chunk_assoc;
 
-       parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
+       ops->chunk_init(chunk->region->priv, chunk->priv, priority);
 
        err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
                                     mlxsw_sp_acl_tcam_chunk_ht_params);
@@ -854,7 +689,7 @@ mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
        return chunk;
 
 err_rhashtable_insert:
-       parman_prio_fini(&chunk->parman_prio);
+       ops->chunk_fini(chunk->priv);
        mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
 err_chunk_assoc:
        kfree(chunk);
@@ -865,11 +700,12 @@ static void
 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
                                struct mlxsw_sp_acl_tcam_chunk *chunk)
 {
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
        struct mlxsw_sp_acl_tcam_group *group = chunk->group;
 
        rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
                               mlxsw_sp_acl_tcam_chunk_ht_params);
-       parman_prio_fini(&chunk->parman_prio);
+       ops->chunk_fini(chunk->priv);
        mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
        kfree(chunk);
 }
@@ -903,11 +739,19 @@ static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
        mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
 }
 
+static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+       return ops->entry_priv_size;
+}
+
 static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
                                       struct mlxsw_sp_acl_tcam_group *group,
                                       struct mlxsw_sp_acl_tcam_entry *entry,
                                       struct mlxsw_sp_acl_rule_info *rulei)
 {
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
        struct mlxsw_sp_acl_tcam_chunk *chunk;
        struct mlxsw_sp_acl_tcam_region *region;
        int err;
@@ -918,24 +762,16 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
                return PTR_ERR(chunk);
 
        region = chunk->region;
-       err = parman_item_add(region->parman, &chunk->parman_prio,
-                             &entry->parman_item);
-       if (err)
-               goto err_parman_item_add;
 
-       err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
-                                                   entry->parman_item.index,
-                                                   rulei);
+       err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
+                            entry->priv, rulei);
        if (err)
-               goto err_rule_insert;
+               goto err_entry_add;
        entry->chunk = chunk;
 
        return 0;
 
-err_rule_insert:
-       parman_item_remove(region->parman, &chunk->parman_prio,
-                          &entry->parman_item);
-err_parman_item_add:
+err_entry_add:
        mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
        return err;
 }
@@ -943,13 +779,11 @@ static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
 static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
                                        struct mlxsw_sp_acl_tcam_entry *entry)
 {
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
        struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
        struct mlxsw_sp_acl_tcam_region *region = chunk->region;
 
-       mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
-                                             entry->parman_item.index);
-       parman_item_remove(region->parman, &chunk->parman_prio,
-                          &entry->parman_item);
+       ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
        mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
 }
 
@@ -958,22 +792,24 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
                                     struct mlxsw_sp_acl_tcam_entry *entry,
                                     bool *activity)
 {
+       const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
        struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
        struct mlxsw_sp_acl_tcam_region *region = chunk->region;
 
-       return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
-                                                          entry->parman_item.index,
-                                                          activity);
+       return ops->entry_activity_get(mlxsw_sp, region->priv,
+                                      entry->priv, activity);
 }
 
 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
        MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
-       MLXSW_AFK_ELEMENT_DMAC,
-       MLXSW_AFK_ELEMENT_SMAC,
+       MLXSW_AFK_ELEMENT_DMAC_32_47,
+       MLXSW_AFK_ELEMENT_DMAC_0_31,
+       MLXSW_AFK_ELEMENT_SMAC_32_47,
+       MLXSW_AFK_ELEMENT_SMAC_0_31,
        MLXSW_AFK_ELEMENT_ETHERTYPE,
        MLXSW_AFK_ELEMENT_IP_PROTO,
-       MLXSW_AFK_ELEMENT_SRC_IP4,
-       MLXSW_AFK_ELEMENT_DST_IP4,
+       MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+       MLXSW_AFK_ELEMENT_DST_IP_0_31,
        MLXSW_AFK_ELEMENT_DST_L4_PORT,
        MLXSW_AFK_ELEMENT_SRC_L4_PORT,
        MLXSW_AFK_ELEMENT_VID,
@@ -987,10 +823,14 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
        MLXSW_AFK_ELEMENT_ETHERTYPE,
        MLXSW_AFK_ELEMENT_IP_PROTO,
-       MLXSW_AFK_ELEMENT_SRC_IP6_HI,
-       MLXSW_AFK_ELEMENT_SRC_IP6_LO,
-       MLXSW_AFK_ELEMENT_DST_IP6_HI,
-       MLXSW_AFK_ELEMENT_DST_IP6_LO,
+       MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+       MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+       MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+       MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+       MLXSW_AFK_ELEMENT_DST_IP_96_127,
+       MLXSW_AFK_ELEMENT_DST_IP_64_95,
+       MLXSW_AFK_ELEMENT_DST_IP_32_63,
+       MLXSW_AFK_ELEMENT_DST_IP_0_31,
        MLXSW_AFK_ELEMENT_DST_L4_PORT,
        MLXSW_AFK_ELEMENT_SRC_L4_PORT,
 };
@@ -1019,10 +859,10 @@ struct mlxsw_sp_acl_tcam_flower_rule {
 
 static int
 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
-                                    void *priv, void *ruleset_priv)
+                                    struct mlxsw_sp_acl_tcam *tcam,
+                                    void *ruleset_priv)
 {
        struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
-       struct mlxsw_sp_acl_tcam *tcam = priv;
 
        return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
                                           mlxsw_sp_acl_tcam_patterns,
@@ -1070,6 +910,12 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
        return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
 }
 
+static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+       return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
+              mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
+}
+
 static int
 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
                                  void *ruleset_priv, void *rule_priv,
@@ -1107,7 +953,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
        .ruleset_bind           = mlxsw_sp_acl_tcam_flower_ruleset_bind,
        .ruleset_unbind         = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
        .ruleset_group_id       = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
-       .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
+       .rule_priv_size         = mlxsw_sp_acl_tcam_flower_rule_priv_size,
        .rule_add               = mlxsw_sp_acl_tcam_flower_rule_add,
        .rule_del               = mlxsw_sp_acl_tcam_flower_rule_del,
        .rule_activity_get      = mlxsw_sp_acl_tcam_flower_rule_activity_get,
@@ -1118,7 +964,7 @@ mlxsw_sp_acl_tcam_profile_ops_arr[] = {
        [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
 };
 
-static const struct mlxsw_sp_acl_profile_ops *
+const struct mlxsw_sp_acl_profile_ops *
 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
                              enum mlxsw_sp_acl_profile profile)
 {
@@ -1131,10 +977,3 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
                return NULL;
        return ops;
 }
-
-const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
-       .priv_size              = sizeof(struct mlxsw_sp_acl_tcam),
-       .init                   = mlxsw_sp_acl_tcam_init,
-       .fini                   = mlxsw_sp_acl_tcam_fini,
-       .profile_ops            = mlxsw_sp_acl_tcam_profile_ops,
-};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
new file mode 100644 (file)
index 0000000..cef7697
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_ACL_TCAM_H
+#define _MLXSW_SPECTRUM_ACL_TCAM_H
+
+#include <linux/list.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+struct mlxsw_sp_acl_tcam {
+       unsigned long *used_regions; /* bit array */
+       unsigned int max_regions;
+       unsigned long *used_groups;  /* bit array */
+       unsigned int max_groups;
+       unsigned int max_group_size;
+       unsigned long priv[0];
+       /* priv has to be always the last item */
+};
+
+size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+                          struct mlxsw_sp_acl_tcam *tcam);
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+                           struct mlxsw_sp_acl_tcam *tcam);
+int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_acl_rule_info *rulei,
+                                  u32 *priority, bool fillup_priority);
+
+struct mlxsw_sp_acl_profile_ops {
+       size_t ruleset_priv_size;
+       int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
+                          struct mlxsw_sp_acl_tcam *tcam, void *ruleset_priv);
+       void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+       int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+                           struct mlxsw_sp_port *mlxsw_sp_port,
+                           bool ingress);
+       void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+                              struct mlxsw_sp_port *mlxsw_sp_port,
+                              bool ingress);
+       u16 (*ruleset_group_id)(void *ruleset_priv);
+       size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp);
+       int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
+                       void *ruleset_priv, void *rule_priv,
+                       struct mlxsw_sp_acl_rule_info *rulei);
+       void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+       int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
+                                bool *activity);
+};
+
+const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
+                             enum mlxsw_sp_acl_profile profile);
+
+#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
+
+#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
+
+struct mlxsw_sp_acl_tcam_group;
+
+struct mlxsw_sp_acl_tcam_region {
+       struct list_head list; /* Member of a TCAM group */
+       struct list_head chunk_list; /* List of chunks under this region */
+       struct mlxsw_sp_acl_tcam_group *group;
+       enum mlxsw_reg_ptar_key_type key_type;
+       u16 id; /* ACL ID and region ID - they are same */
+       char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
+       struct mlxsw_afk_key_info *key_info;
+       struct mlxsw_sp *mlxsw_sp;
+       unsigned long priv[0];
+       /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_ctcam_region {
+       struct parman *parman;
+       struct mlxsw_sp_acl_tcam_region *region;
+};
+
+struct mlxsw_sp_acl_ctcam_chunk {
+       struct parman_prio parman_prio;
+};
+
+struct mlxsw_sp_acl_ctcam_entry {
+       struct parman_item parman_item;
+};
+
+int mlxsw_sp_acl_ctcam_region_init(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_acl_ctcam_region *cregion,
+                                  struct mlxsw_sp_acl_tcam_region *region);
+void mlxsw_sp_acl_ctcam_region_fini(struct mlxsw_sp_acl_ctcam_region *cregion);
+void mlxsw_sp_acl_ctcam_chunk_init(struct mlxsw_sp_acl_ctcam_region *cregion,
+                                  struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+                                  unsigned int priority);
+void mlxsw_sp_acl_ctcam_chunk_fini(struct mlxsw_sp_acl_ctcam_chunk *cchunk);
+int mlxsw_sp_acl_ctcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_acl_ctcam_region *cregion,
+                                struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+                                struct mlxsw_sp_acl_ctcam_entry *centry,
+                                struct mlxsw_sp_acl_rule_info *rulei,
+                                bool fillup_priority);
+void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+                                 struct mlxsw_sp_acl_ctcam_region *cregion,
+                                 struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+                                 struct mlxsw_sp_acl_ctcam_entry *centry);
+static inline unsigned int
+mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
+{
+       return centry->parman_item.index;
+}
+
+#endif
index 89dbf569dff50c0db7d97d3b4e80e8bd7cf494d6..201761a3539e73def28e1b642cf820abe08eda12 100644 (file)
@@ -144,10 +144,12 @@ static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
                                          FLOW_DISSECTOR_KEY_IPV4_ADDRS,
                                          f->mask);
 
-       mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
-                                      ntohl(key->src), ntohl(mask->src));
-       mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
-                                      ntohl(key->dst), ntohl(mask->dst));
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+                                      (char *) &key->src,
+                                      (char *) &mask->src, 4);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+                                      (char *) &key->dst,
+                                      (char *) &mask->dst, 4);
 }
 
 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
@@ -161,24 +163,31 @@ static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
                skb_flow_dissector_target(f->dissector,
                                          FLOW_DISSECTOR_KEY_IPV6_ADDRS,
                                          f->mask);
-       size_t addr_half_size = sizeof(key->src) / 2;
-
-       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
-                                      &key->src.s6_addr[0],
-                                      &mask->src.s6_addr[0],
-                                      addr_half_size);
-       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
-                                      &key->src.s6_addr[addr_half_size],
-                                      &mask->src.s6_addr[addr_half_size],
-                                      addr_half_size);
-       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
-                                      &key->dst.s6_addr[0],
-                                      &mask->dst.s6_addr[0],
-                                      addr_half_size);
-       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
-                                      &key->dst.s6_addr[addr_half_size],
-                                      &mask->dst.s6_addr[addr_half_size],
-                                      addr_half_size);
+
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+                                      &key->src.s6_addr[0x0],
+                                      &mask->src.s6_addr[0x0], 4);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+                                      &key->src.s6_addr[0x4],
+                                      &mask->src.s6_addr[0x4], 4);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+                                      &key->src.s6_addr[0x8],
+                                      &mask->src.s6_addr[0x8], 4);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+                                      &key->src.s6_addr[0xC],
+                                      &mask->src.s6_addr[0xC], 4);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
+                                      &key->dst.s6_addr[0x0],
+                                      &mask->dst.s6_addr[0x0], 4);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
+                                      &key->dst.s6_addr[0x4],
+                                      &mask->dst.s6_addr[0x4], 4);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
+                                      &key->dst.s6_addr[0x8],
+                                      &mask->dst.s6_addr[0x8], 4);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+                                      &key->dst.s6_addr[0xC],
+                                      &mask->dst.s6_addr[0xC], 4);
 }
 
 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
@@ -340,13 +349,17 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
                                                  f->mask);
 
                mlxsw_sp_acl_rulei_keymask_buf(rulei,
-                                              MLXSW_AFK_ELEMENT_DMAC,
-                                              key->dst, mask->dst,
-                                              sizeof(key->dst));
+                                              MLXSW_AFK_ELEMENT_DMAC_32_47,
+                                              key->dst, mask->dst, 2);
+               mlxsw_sp_acl_rulei_keymask_buf(rulei,
+                                              MLXSW_AFK_ELEMENT_DMAC_0_31,
+                                              key->dst + 2, mask->dst + 2, 4);
+               mlxsw_sp_acl_rulei_keymask_buf(rulei,
+                                              MLXSW_AFK_ELEMENT_SMAC_32_47,
+                                              key->src, mask->src, 2);
                mlxsw_sp_acl_rulei_keymask_buf(rulei,
-                                              MLXSW_AFK_ELEMENT_SMAC,
-                                              key->src, mask->src,
-                                              sizeof(key->src));
+                                              MLXSW_AFK_ELEMENT_SMAC_0_31,
+                                              key->src + 2, mask->src + 2, 4);
        }
 
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
index fe4327f547d23b1caa5138cfe1544f05af2511d3..fd557585514d50def8762283dd3afa86c10d0194 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2018 Jiri Pirko <jiri@mellanox.com>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  */
 
 #include <linux/kernel.h>
-#include <linux/bitops.h>
+#include <linux/slab.h>
 
 #include "spectrum.h"
 
-#define MLXSW_SP_KVDL_SINGLE_BASE 0
-#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
-#define MLXSW_SP_KVDL_SINGLE_END \
-       (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1)
-
-#define MLXSW_SP_KVDL_CHUNKS_BASE \
-       (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
-#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152
-#define MLXSW_SP_KVDL_CHUNKS_END \
-       (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1)
-
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \
-       (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE)
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \
-       (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE)
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
-       (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
-
-#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1
-#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32
-#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
-
-struct mlxsw_sp_kvdl_part_info {
-       unsigned int part_index;
-       unsigned int start_index;
-       unsigned int end_index;
-       unsigned int alloc_size;
-       enum mlxsw_sp_resource_id resource_id;
-};
-
-enum mlxsw_sp_kvdl_part_id {
-       MLXSW_SP_KVDL_PART_ID_SINGLE,
-       MLXSW_SP_KVDL_PART_ID_CHUNKS,
-       MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS,
-};
-
-#define MLXSW_SP_KVDL_PART_INFO(id)                            \
-[MLXSW_SP_KVDL_PART_ID_##id] = {                               \
-       .start_index = MLXSW_SP_KVDL_##id##_BASE,               \
-       .end_index = MLXSW_SP_KVDL_##id##_END,                  \
-       .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE,          \
-       .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id,       \
-}
-
-static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = {
-       MLXSW_SP_KVDL_PART_INFO(SINGLE),
-       MLXSW_SP_KVDL_PART_INFO(CHUNKS),
-       MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS),
-};
-
-#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info)
-
-struct mlxsw_sp_kvdl_part {
-       struct mlxsw_sp_kvdl_part_info info;
-       unsigned long usage[0]; /* Entries */
-};
-
 struct mlxsw_sp_kvdl {
-       struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN];
+       const struct mlxsw_sp_kvdl_ops *kvdl_ops;
+       unsigned long priv[0];
+       /* priv has to be always the last item */
 };
 
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
-                             unsigned int alloc_size)
-{
-       struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
-       int i;
-
-       for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
-               part = kvdl->parts[i];
-               if (alloc_size <= part->info.alloc_size &&
-                   (!min_part ||
-                    part->info.alloc_size <= min_part->info.alloc_size))
-                       min_part = part;
-       }
-
-       return min_part ?: ERR_PTR(-ENOBUFS);
-}
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
-{
-       struct mlxsw_sp_kvdl_part *part;
-       int i;
-
-       for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
-               part = kvdl->parts[i];
-               if (kvdl_index >= part->info.start_index &&
-                   kvdl_index <= part->info.end_index)
-                       return part;
-       }
-
-       return ERR_PTR(-EINVAL);
-}
-
-static u32
-mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info,
-                               unsigned int entry_index)
-{
-       return info->start_index + entry_index * info->alloc_size;
-}
-
-static unsigned int
-mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
-                               u32 kvdl_index)
-{
-       return (kvdl_index - info->start_index) / info->alloc_size;
-}
-
-static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
-                                   u32 *p_kvdl_index)
-{
-       const struct mlxsw_sp_kvdl_part_info *info = &part->info;
-       unsigned int entry_index, nr_entries;
-
-       nr_entries = (info->end_index - info->start_index + 1) /
-                    info->alloc_size;
-       entry_index = find_first_zero_bit(part->usage, nr_entries);
-       if (entry_index == nr_entries)
-               return -ENOBUFS;
-       __set_bit(entry_index, part->usage);
-
-       *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index);
-
-       return 0;
-}
-
-static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
-                                   u32 kvdl_index)
-{
-       const struct mlxsw_sp_kvdl_part_info *info = &part->info;
-       unsigned int entry_index;
-
-       entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index);
-       __clear_bit(entry_index, part->usage);
-}
-
-int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
-                       u32 *p_entry_index)
-{
-       struct mlxsw_sp_kvdl_part *part;
-
-       /* Find partition with smallest allocation size satisfying the
-        * requested size.
-        */
-       part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
-       if (IS_ERR(part))
-               return PTR_ERR(part);
-
-       return mlxsw_sp_kvdl_part_alloc(part, p_entry_index);
-}
-
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
-{
-       struct mlxsw_sp_kvdl_part *part;
-
-       part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index);
-       if (IS_ERR(part))
-               return;
-       mlxsw_sp_kvdl_part_free(part, entry_index);
-}
-
-int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
-                                  unsigned int entry_count,
-                                  unsigned int *p_alloc_size)
-{
-       struct mlxsw_sp_kvdl_part *part;
-
-       part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
-       if (IS_ERR(part))
-               return PTR_ERR(part);
-
-       *p_alloc_size = part->info.alloc_size;
-
-       return 0;
-}
-
-static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part,
-                                     struct mlxsw_sp_kvdl_part *part_prev,
-                                     unsigned int size)
-{
-
-       if (!part_prev) {
-               part->info.end_index = size - 1;
-       } else {
-               part->info.start_index = part_prev->info.end_index + 1;
-               part->info.end_index = part->info.start_index + size - 1;
-       }
-}
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
-                       const struct mlxsw_sp_kvdl_part_info *info,
-                       struct mlxsw_sp_kvdl_part *part_prev)
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
 {
-       struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
-       struct mlxsw_sp_kvdl_part *part;
-       bool need_update = true;
-       unsigned int nr_entries;
-       size_t usage_size;
-       u64 resource_size;
+       const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops;
+       struct mlxsw_sp_kvdl *kvdl;
        int err;
 
-       err = devlink_resource_size_get(devlink, info->resource_id,
-                                       &resource_size);
-       if (err) {
-               need_update = false;
-               resource_size = info->end_index - info->start_index + 1;
-       }
-
-       nr_entries = div_u64(resource_size, info->alloc_size);
-       usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
-       part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
-       if (!part)
-               return ERR_PTR(-ENOMEM);
-
-       memcpy(&part->info, info, sizeof(part->info));
-
-       if (need_update)
-               mlxsw_sp_kvdl_part_update(part, part_prev, resource_size);
-       return part;
-}
-
-static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part)
-{
-       kfree(part);
-}
-
-static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
-{
-       struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
-       const struct mlxsw_sp_kvdl_part_info *info;
-       struct mlxsw_sp_kvdl_part *part_prev = NULL;
-       int err, i;
+       kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size,
+                      GFP_KERNEL);
+       if (!kvdl)
+               return -ENOMEM;
+       kvdl->kvdl_ops = kvdl_ops;
+       mlxsw_sp->kvdl = kvdl;
 
-       for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
-               info = &mlxsw_sp_kvdl_parts_info[i];
-               kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info,
-                                                        part_prev);
-               if (IS_ERR(kvdl->parts[i])) {
-                       err = PTR_ERR(kvdl->parts[i]);
-                       goto err_kvdl_part_init;
-               }
-               part_prev = kvdl->parts[i];
-       }
+       err = kvdl_ops->init(mlxsw_sp, kvdl->priv);
+       if (err)
+               goto err_init;
        return 0;
 
-err_kvdl_part_init:
-       for (i--; i >= 0; i--)
-               mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
+err_init:
+       kfree(kvdl);
        return err;
 }
 
-static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
 {
        struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
-       int i;
-
-       for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
-               mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
-}
-
-static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
-{
-       const struct mlxsw_sp_kvdl_part_info *info = &part->info;
-       unsigned int nr_entries;
-       int bit = -1;
-       u64 occ = 0;
-
-       nr_entries = (info->end_index -
-                     info->start_index + 1) /
-                     info->alloc_size;
-       while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
-               < nr_entries)
-               occ += info->alloc_size;
-       return occ;
-}
-
-static u64 mlxsw_sp_kvdl_occ_get(void *priv)
-{
-       const struct mlxsw_sp *mlxsw_sp = priv;
-       u64 occ = 0;
-       int i;
-
-       for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
-               occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]);
-
-       return occ;
-}
-
-static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
-{
-       const struct mlxsw_sp *mlxsw_sp = priv;
-       struct mlxsw_sp_kvdl_part *part;
-
-       part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
-       return mlxsw_sp_kvdl_part_occ(part);
-}
-
-static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
-{
-       const struct mlxsw_sp *mlxsw_sp = priv;
-       struct mlxsw_sp_kvdl_part *part;
-
-       part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
-       return mlxsw_sp_kvdl_part_occ(part);
-}
-
-static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
-{
-       const struct mlxsw_sp *mlxsw_sp = priv;
-       struct mlxsw_sp_kvdl_part *part;
 
-       part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
-       return mlxsw_sp_kvdl_part_occ(part);
+       kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
+       kfree(kvdl);
 }
 
-int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
+                       enum mlxsw_sp_kvdl_entry_type type,
+                       unsigned int entry_count, u32 *p_entry_index)
 {
-       struct devlink *devlink = priv_to_devlink(mlxsw_core);
-       static struct devlink_resource_size_params size_params;
-       u32 kvdl_max_size;
-       int err;
-
-       kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
-                       MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
-                       MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
-
-       devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
-                                         MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE,
-                                         DEVLINK_RESOURCE_UNIT_ENTRY);
-       err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
-                                       MLXSW_SP_KVDL_SINGLE_SIZE,
-                                       MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
-                                       MLXSW_SP_RESOURCE_KVD_LINEAR,
-                                       &size_params);
-       if (err)
-               return err;
-
-       devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
-                                         MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE,
-                                         DEVLINK_RESOURCE_UNIT_ENTRY);
-       err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
-                                       MLXSW_SP_KVDL_CHUNKS_SIZE,
-                                       MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
-                                       MLXSW_SP_RESOURCE_KVD_LINEAR,
-                                       &size_params);
-       if (err)
-               return err;
+       struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
 
-       devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
-                                         MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
-                                         DEVLINK_RESOURCE_UNIT_ENTRY);
-       err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
-                                       MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
-                                       MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
-                                       MLXSW_SP_RESOURCE_KVD_LINEAR,
-                                       &size_params);
-       return err;
+       return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
+                                    entry_count, p_entry_index);
 }
 
-int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
+                       enum mlxsw_sp_kvdl_entry_type type,
+                       unsigned int entry_count, int entry_index)
 {
-       struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
-       struct mlxsw_sp_kvdl *kvdl;
-       int err;
-
-       kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL);
-       if (!kvdl)
-               return -ENOMEM;
-       mlxsw_sp->kvdl = kvdl;
-
-       err = mlxsw_sp_kvdl_parts_init(mlxsw_sp);
-       if (err)
-               goto err_kvdl_parts_init;
-
-       devlink_resource_occ_get_register(devlink,
-                                         MLXSW_SP_RESOURCE_KVD_LINEAR,
-                                         mlxsw_sp_kvdl_occ_get,
-                                         mlxsw_sp);
-       devlink_resource_occ_get_register(devlink,
-                                         MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
-                                         mlxsw_sp_kvdl_single_occ_get,
-                                         mlxsw_sp);
-       devlink_resource_occ_get_register(devlink,
-                                         MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
-                                         mlxsw_sp_kvdl_chunks_occ_get,
-                                         mlxsw_sp);
-       devlink_resource_occ_get_register(devlink,
-                                         MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
-                                         mlxsw_sp_kvdl_large_chunks_occ_get,
-                                         mlxsw_sp);
-
-       return 0;
+       struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
 
-err_kvdl_parts_init:
-       kfree(mlxsw_sp->kvdl);
-       return err;
+       kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
+                            entry_count, entry_index);
 }
 
-void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
+int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
+                                   enum mlxsw_sp_kvdl_entry_type type,
+                                   unsigned int entry_count,
+                                   unsigned int *p_alloc_count)
 {
-       struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+       struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
 
-       devlink_resource_occ_get_unregister(devlink,
-                                           MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
-       devlink_resource_occ_get_unregister(devlink,
-                                           MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
-       devlink_resource_occ_get_unregister(devlink,
-                                           MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
-       devlink_resource_occ_get_unregister(devlink,
-                                           MLXSW_SP_RESOURCE_KVD_LINEAR);
-       mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
-       kfree(mlxsw_sp->kvdl);
+       return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv, type,
+                                               entry_count, p_alloc_count);
 }
index a82539609d492733332c2a6a18eed0952e4615c0..98dcaf78365c2cf8ad5539c1c11809d9e8e089e1 100644 (file)
@@ -1075,6 +1075,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
        struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
 
        cancel_delayed_work_sync(&mr->stats_update_dw);
-       mr->mr_ops->fini(mr->priv);
+       mr->mr_ops->fini(mlxsw_sp, mr->priv);
        kfree(mr);
 }
index 7c864a86811d5321a8f822b438c56d43cdfb8342..c92fa90dca31e4c49f839e96051aef8cc9fd63b7 100644 (file)
@@ -46,15 +46,6 @@ enum mlxsw_sp_mr_route_action {
        MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD,
 };
 
-enum mlxsw_sp_mr_route_prio {
-       MLXSW_SP_MR_ROUTE_PRIO_SG,
-       MLXSW_SP_MR_ROUTE_PRIO_STARG,
-       MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
-       __MLXSW_SP_MR_ROUTE_PRIO_MAX
-};
-
-#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
-
 struct mlxsw_sp_mr_route_key {
        int vrid;
        enum mlxsw_sp_l3proto proto;
@@ -101,7 +92,7 @@ struct mlxsw_sp_mr_ops {
                              u16 erif_index);
        void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
                              void *route_priv);
-       void (*fini)(void *priv);
+       void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
 };
 
 struct mlxsw_sp_mr;
index 4f4c0d31188364a9f9821a5dbcd6653622ac3461..e9c9f1f45b9da0afaf8fe367293259ba49fe9345 100644 (file)
@@ -1,7 +1,8 @@
 /*
  * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
  * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ * Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -35,7 +36,6 @@
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
-#include <linux/parman.h>
 
 #include "spectrum_mr_tcam.h"
 #include "reg.h"
 #include "core_acl_flex_actions.h"
 #include "spectrum_mr.h"
 
-struct mlxsw_sp_mr_tcam_region {
-       struct mlxsw_sp *mlxsw_sp;
-       enum mlxsw_reg_rtar_key_type rtar_key_type;
-       struct parman *parman;
-       struct parman_prio *parman_prios;
-};
-
 struct mlxsw_sp_mr_tcam {
-       struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
+       void *priv;
 };
 
 /* This struct maps to one RIGR2 register entry */
@@ -84,8 +77,6 @@ mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
        INIT_LIST_HEAD(&erif_list->erif_sublists);
 }
 
-#define MLXSW_SP_KVDL_RIGR2_SIZE 1
-
 static struct mlxsw_sp_mr_erif_sublist *
 mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
                                struct mlxsw_sp_mr_tcam_erif_list *erif_list)
@@ -96,8 +87,8 @@ mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
        erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
        if (!erif_sublist)
                return ERR_PTR(-ENOMEM);
-       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
-                                 &erif_sublist->rigr2_kvdl_index);
+       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+                                 1, &erif_sublist->rigr2_kvdl_index);
        if (err) {
                kfree(erif_sublist);
                return ERR_PTR(err);
@@ -112,7 +103,8 @@ mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
                                 struct mlxsw_sp_mr_erif_sublist *erif_sublist)
 {
        list_del(&erif_sublist->list);
-       mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
+       mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+                          1, erif_sublist->rigr2_kvdl_index);
        kfree(erif_sublist);
 }
 
@@ -221,12 +213,11 @@ struct mlxsw_sp_mr_tcam_route {
        struct mlxsw_sp_mr_tcam_erif_list erif_list;
        struct mlxsw_afa_block *afa_block;
        u32 counter_index;
-       struct parman_item parman_item;
-       struct parman_prio *parman_prio;
        enum mlxsw_sp_mr_route_action action;
        struct mlxsw_sp_mr_route_key key;
        u16 irif_index;
        u16 min_mtu;
+       void *priv;
 };
 
 static struct mlxsw_afa_block *
@@ -297,60 +288,6 @@ mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
        mlxsw_afa_block_destroy(afa_block);
 }
 
-static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
-                                         struct parman_item *parman_item,
-                                         struct mlxsw_sp_mr_route_key *key,
-                                         struct mlxsw_afa_block *afa_block)
-{
-       char rmft2_pl[MLXSW_REG_RMFT2_LEN];
-
-       switch (key->proto) {
-       case MLXSW_SP_L3_PROTO_IPV4:
-               mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
-                                         key->vrid,
-                                         MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
-                                         ntohl(key->group.addr4),
-                                         ntohl(key->group_mask.addr4),
-                                         ntohl(key->source.addr4),
-                                         ntohl(key->source_mask.addr4),
-                                         mlxsw_afa_block_first_set(afa_block));
-               break;
-       case MLXSW_SP_L3_PROTO_IPV6:
-               mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
-                                         key->vrid,
-                                         MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
-                                         key->group.addr6,
-                                         key->group_mask.addr6,
-                                         key->source.addr6,
-                                         key->source_mask.addr6,
-                                         mlxsw_afa_block_first_set(afa_block));
-       }
-
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
-}
-
-static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
-                                        struct mlxsw_sp_mr_route_key *key,
-                                        struct parman_item *parman_item)
-{
-       struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
-       char rmft2_pl[MLXSW_REG_RMFT2_LEN];
-
-       switch (key->proto) {
-       case MLXSW_SP_L3_PROTO_IPV4:
-               mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
-                                         vrid, 0, 0, 0, 0, 0, 0, NULL);
-               break;
-       case MLXSW_SP_L3_PROTO_IPV6:
-               mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
-                                         vrid, 0, 0, zero_addr, zero_addr,
-                                         zero_addr, zero_addr, NULL);
-               break;
-       }
-
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
-}
-
 static int
 mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
                               struct mlxsw_sp_mr_tcam_erif_list *erif_list,
@@ -370,51 +307,12 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
        return 0;
 }
 
-static struct mlxsw_sp_mr_tcam_region *
-mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
-                                enum mlxsw_sp_l3proto proto)
-{
-       return &mr_tcam->tcam_regions[proto];
-}
-
-static int
-mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
-                                      struct mlxsw_sp_mr_tcam_route *route,
-                                      enum mlxsw_sp_mr_route_prio prio)
-{
-       struct mlxsw_sp_mr_tcam_region *tcam_region;
-       int err;
-
-       tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
-                                                      route->key.proto);
-       err = parman_item_add(tcam_region->parman,
-                             &tcam_region->parman_prios[prio],
-                             &route->parman_item);
-       if (err)
-               return err;
-
-       route->parman_prio = &tcam_region->parman_prios[prio];
-       return 0;
-}
-
-static void
-mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
-                                         struct mlxsw_sp_mr_tcam_route *route)
-{
-       struct mlxsw_sp_mr_tcam_region *tcam_region;
-
-       tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
-                                                      route->key.proto);
-
-       parman_item_remove(tcam_region->parman,
-                          route->parman_prio, &route->parman_item);
-}
-
 static int
 mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
                              void *route_priv,
                              struct mlxsw_sp_mr_route_params *route_params)
 {
+       const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
        struct mlxsw_sp_mr_tcam_route *route = route_priv;
        struct mlxsw_sp_mr_tcam *mr_tcam = priv;
        int err;
@@ -448,22 +346,23 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
                goto err_afa_block_create;
        }
 
-       /* Allocate place in the TCAM */
-       err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
-                                                    route_params->prio);
-       if (err)
-               goto err_parman_item_add;
+       route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL);
+       if (!route->priv) {
+               err = -ENOMEM;
+               goto err_route_priv_alloc;
+       }
 
        /* Write the route to the TCAM */
-       err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
-                                            &route->key, route->afa_block);
+       err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv,
+                               &route->key, route->afa_block,
+                               route_params->prio);
        if (err)
-               goto err_route_replace;
+               goto err_route_create;
        return 0;
 
-err_route_replace:
-       mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
-err_parman_item_add:
+err_route_create:
+       kfree(route->priv);
+err_route_priv_alloc:
        mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
 err_afa_block_create:
        mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
@@ -476,12 +375,12 @@ mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
 static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
                                           void *priv, void *route_priv)
 {
+       const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
        struct mlxsw_sp_mr_tcam_route *route = route_priv;
        struct mlxsw_sp_mr_tcam *mr_tcam = priv;
 
-       mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
-                                     &route->key, &route->parman_item);
-       mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
+       ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key);
+       kfree(route->priv);
        mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
        mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
        mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
@@ -502,6 +401,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
                                     void *route_priv,
                                     enum mlxsw_sp_mr_route_action route_action)
 {
+       const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
        struct mlxsw_sp_mr_tcam_route *route = route_priv;
        struct mlxsw_afa_block *afa_block;
        int err;
@@ -516,8 +416,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
                return PTR_ERR(afa_block);
 
        /* Update the TCAM route entry */
-       err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
-                                            &route->key, afa_block);
+       err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
        if (err)
                goto err;
 
@@ -534,6 +433,7 @@ mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
 static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
                                                 void *route_priv, u16 min_mtu)
 {
+       const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
        struct mlxsw_sp_mr_tcam_route *route = route_priv;
        struct mlxsw_afa_block *afa_block;
        int err;
@@ -549,8 +449,7 @@ static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
                return PTR_ERR(afa_block);
 
        /* Update the TCAM route entry */
-       err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
-                                            &route->key, afa_block);
+       err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
        if (err)
                goto err;
 
@@ -596,6 +495,7 @@ static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
 static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
                                           void *route_priv, u16 erif_index)
 {
+       const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
        struct mlxsw_sp_mr_tcam_route *route = route_priv;
        struct mlxsw_sp_mr_erif_sublist *erif_sublist;
        struct mlxsw_sp_mr_tcam_erif_list erif_list;
@@ -630,8 +530,7 @@ static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
        }
 
        /* Update the TCAM route entry */
-       err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
-                                            &route->key, afa_block);
+       err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
        if (err)
                goto err_route_write;
 
@@ -653,6 +552,7 @@ static int
 mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
                              struct mlxsw_sp_mr_route_info *route_info)
 {
+       const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
        struct mlxsw_sp_mr_tcam_route *route = route_priv;
        struct mlxsw_sp_mr_tcam_erif_list erif_list;
        struct mlxsw_afa_block *afa_block;
@@ -677,8 +577,7 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
        }
 
        /* Update the TCAM route entry */
-       err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
-                                            &route->key, afa_block);
+       err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block);
        if (err)
                goto err_route_write;
 
@@ -699,167 +598,36 @@ mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
        return err;
 }
 
-#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
-#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
-
-static int
-mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
-       struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
-       char rtar_pl[MLXSW_REG_RTAR_LEN];
-
-       mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
-                           mr_tcam_region->rtar_key_type,
-                           MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static void
-mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
-       struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
-       char rtar_pl[MLXSW_REG_RTAR_LEN];
-
-       mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
-                           mr_tcam_region->rtar_key_type, 0);
-       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
-                                                unsigned long new_count)
-{
-       struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
-       struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
-       char rtar_pl[MLXSW_REG_RTAR_LEN];
-       u64 max_tcam_rules;
-
-       max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
-       if (new_count > max_tcam_rules)
-               return -EINVAL;
-       mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
-                           mr_tcam_region->rtar_key_type, new_count);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
-}
-
-static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
-                                               unsigned long from_index,
-                                               unsigned long to_index,
-                                               unsigned long count)
-{
-       struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
-       struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
-       char rrcr_pl[MLXSW_REG_RRCR_LEN];
-
-       mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
-                           from_index, count,
-                           mr_tcam_region->rtar_key_type, to_index);
-       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
-}
-
-static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
-       .base_count     = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
-       .resize_step    = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
-       .resize         = mlxsw_sp_mr_tcam_region_parman_resize,
-       .move           = mlxsw_sp_mr_tcam_region_parman_move,
-       .algo           = PARMAN_ALGO_TYPE_LSORT,
-};
-
-static int
-mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
-                            struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
-                            enum mlxsw_reg_rtar_key_type rtar_key_type)
-{
-       struct parman_prio *parman_prios;
-       struct parman *parman;
-       int err;
-       int i;
-
-       mr_tcam_region->rtar_key_type = rtar_key_type;
-       mr_tcam_region->mlxsw_sp = mlxsw_sp;
-
-       err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
-       if (err)
-               return err;
-
-       parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
-                              mr_tcam_region);
-       if (!parman) {
-               err = -ENOMEM;
-               goto err_parman_create;
-       }
-       mr_tcam_region->parman = parman;
-
-       parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
-                                    sizeof(*parman_prios), GFP_KERNEL);
-       if (!parman_prios) {
-               err = -ENOMEM;
-               goto err_parman_prios_alloc;
-       }
-       mr_tcam_region->parman_prios = parman_prios;
-
-       for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
-               parman_prio_init(mr_tcam_region->parman,
-                                &mr_tcam_region->parman_prios[i], i);
-       return 0;
-
-err_parman_prios_alloc:
-       parman_destroy(parman);
-err_parman_create:
-       mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
-       return err;
-}
-
-static void
-mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
-{
-       int i;
-
-       for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
-               parman_prio_fini(&mr_tcam_region->parman_prios[i]);
-       kfree(mr_tcam_region->parman_prios);
-       parman_destroy(mr_tcam_region->parman);
-       mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
-}
-
 static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
 {
+       const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
        struct mlxsw_sp_mr_tcam *mr_tcam = priv;
-       struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
-       u32 rtar_key;
        int err;
 
-       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
-           !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+       if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES))
                return -EIO;
 
-       rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
-       err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
-                                          &region[MLXSW_SP_L3_PROTO_IPV4],
-                                          rtar_key);
-       if (err)
-               return err;
+       mr_tcam->priv = kzalloc(ops->priv_size, GFP_KERNEL);
+       if (!mr_tcam->priv)
+               return -ENOMEM;
 
-       rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
-       err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
-                                          &region[MLXSW_SP_L3_PROTO_IPV6],
-                                          rtar_key);
+       err = ops->init(mlxsw_sp, mr_tcam->priv);
        if (err)
-               goto err_ipv6_region_init;
-
+               goto err_init;
        return 0;
 
-err_ipv6_region_init:
-       mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+err_init:
+       kfree(mr_tcam->priv);
        return err;
 }
 
-static void mlxsw_sp_mr_tcam_fini(void *priv)
+static void mlxsw_sp_mr_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
 {
+       const struct mlxsw_sp_mr_tcam_ops *ops = mlxsw_sp->mr_tcam_ops;
        struct mlxsw_sp_mr_tcam *mr_tcam = priv;
-       struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
 
-       mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
-       mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+       ops->fini(mr_tcam->priv);
+       kfree(mr_tcam->priv);
 }
 
 const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
index 6aaaf3d9ba31d9538d9307caa0450a848bf6b091..e51c8dc52f376c4cd5a93b7dda8fb45f8071a3eb 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/route.h>
 #include <linux/gcd.h>
 #include <linux/random.h>
+#include <linux/if_macvlan.h>
 #include <net/netevent.h>
 #include <net/neighbour.h>
 #include <net/arp.h>
@@ -60,6 +61,7 @@
 #include <net/ndisc.h>
 #include <net/ipv6.h>
 #include <net/fib_notifier.h>
+#include <net/switchdev.h>
 
 #include "spectrum.h"
 #include "core.h"
@@ -163,7 +165,9 @@ struct mlxsw_sp_rif_ops {
                      const struct mlxsw_sp_rif_params *params);
        int (*configure)(struct mlxsw_sp_rif *rif);
        void (*deconfigure)(struct mlxsw_sp_rif *rif);
-       struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
+       struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
+                                        struct netlink_ext_ack *extack);
+       void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
 };
 
 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
@@ -342,10 +346,6 @@ static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
        mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
 }
 
-static struct mlxsw_sp_rif *
-mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
-                        const struct net_device *dev);
-
 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
 
 struct mlxsw_sp_prefix_usage {
@@ -1109,7 +1109,8 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
        u32 tunnel_index;
        int err;
 
-       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
+       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+                                 1, &tunnel_index);
        if (err)
                return err;
 
@@ -1125,7 +1126,8 @@ static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
        /* Unlink this node from the IPIP entry that it's the decap entry of. */
        fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
        fib_entry->decap.ipip_entry = NULL;
-       mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
+       mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+                          1, fib_entry->decap.tunnel_index);
 }
 
 static struct mlxsw_sp_fib_node *
@@ -3165,8 +3167,9 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
         * by the device and make sure the request can be satisfied.
         */
        mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
-       err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
-                                            &alloc_size);
+       err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
+                                             MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+                                             *p_adj_grp_size, &alloc_size);
        if (err)
                return err;
        /* It is possible the allocation results in more allocated
@@ -3278,7 +3281,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
                /* No valid allocation size available. */
                goto set_trap;
 
-       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
+       err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+                                 ecmp_size, &adj_index);
        if (err) {
                /* We ran out of KVD linear space, just set the
                 * trap and let everything flow through kernel.
@@ -3313,7 +3317,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
 
        err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
                                             old_adj_index, old_ecmp_size);
-       mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
+       mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+                          old_ecmp_size, old_adj_index);
        if (err) {
                dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
                goto set_trap;
@@ -3335,7 +3340,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
        if (err)
                dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
        if (old_adj_index_valid)
-               mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
+               mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
+                                  nh_grp->ecmp_size, nh_grp->adj_index);
 }
 
 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
@@ -5967,7 +5973,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
-static struct mlxsw_sp_rif *
+struct mlxsw_sp_rif *
 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
                         const struct net_device *dev)
 {
@@ -6024,6 +6030,12 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
                    !list_empty(&inet6_dev->addr_list))
                        addr_list_empty = false;
 
+               /* macvlans do not have a RIF, but rather piggy back on the
+                * RIF of their lower device.
+                */
+               if (netif_is_macvlan(dev) && addr_list_empty)
+                       return true;
+
                if (rif && addr_list_empty &&
                    !netif_is_l3_slave(rif->dev))
                        return true;
@@ -6125,6 +6137,11 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
        return rif->dev;
 }
 
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
+{
+       return rif->fid;
+}
+
 static struct mlxsw_sp_rif *
 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
                    const struct mlxsw_sp_rif_params *params,
@@ -6162,7 +6179,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
        rif->ops = ops;
 
        if (ops->fid_get) {
-               fid = ops->fid_get(rif);
+               fid = ops->fid_get(rif, extack);
                if (IS_ERR(fid)) {
                        err = PTR_ERR(fid);
                        goto err_fid_get;
@@ -6267,7 +6284,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
        }
 
        /* FID was already created, just take a reference */
-       fid = rif->ops->fid_get(rif);
+       fid = rif->ops->fid_get(rif, extack);
        err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
        if (err)
                goto err_fid_port_vid_map;
@@ -6432,6 +6449,123 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
        return 0;
 }
 
+static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
+{
+       u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
+       u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+       return ether_addr_equal_masked(mac, vrrp4, mask);
+}
+
+static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
+{
+       u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
+       u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+       return ether_addr_equal_masked(mac, vrrp6, mask);
+}
+
+static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
+                               const u8 *mac, bool adding)
+{
+       char ritr_pl[MLXSW_REG_RITR_LEN];
+       u8 vrrp_id = adding ? mac[5] : 0;
+       int err;
+
+       if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
+           !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
+               return 0;
+
+       mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+       if (err)
+               return err;
+
+       if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
+               mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
+       else
+               mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
+                                   const struct net_device *macvlan_dev,
+                                   struct netlink_ext_ack *extack)
+{
+       struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+       struct mlxsw_sp_rif *rif;
+       int err;
+
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+       if (!rif) {
+               NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+               return -EOPNOTSUPP;
+       }
+
+       err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+                                 mlxsw_sp_fid_index(rif->fid), true);
+       if (err)
+               return err;
+
+       err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
+                                  macvlan_dev->dev_addr, true);
+       if (err)
+               goto err_rif_vrrp_add;
+
+       /* Make sure the bridge driver does not have this MAC pointing at
+        * some other port.
+        */
+       if (rif->ops->fdb_del)
+               rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
+
+       return 0;
+
+err_rif_vrrp_add:
+       mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+                           mlxsw_sp_fid_index(rif->fid), false);
+       return err;
+}
+
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+                             const struct net_device *macvlan_dev)
+{
+       struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
+       struct mlxsw_sp_rif *rif;
+
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
+       /* If we do not have a RIF, then we already took care of
+        * removing the macvlan's MAC during RIF deletion.
+        */
+       if (!rif)
+               return;
+       mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
+                            false);
+       mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
+                           mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
+                                          unsigned long event,
+                                          struct netlink_ext_ack *extack)
+{
+       struct mlxsw_sp *mlxsw_sp;
+
+       mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
+       if (!mlxsw_sp)
+               return 0;
+
+       switch (event) {
+       case NETDEV_UP:
+               return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
+       case NETDEV_DOWN:
+               mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+               break;
+       }
+
+       return 0;
+}
+
 static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
                                     unsigned long event,
                                     struct netlink_ext_ack *extack)
@@ -6444,6 +6578,8 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
                return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
        else if (is_vlan_dev(dev))
                return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
+       else if (netif_is_macvlan(dev))
+               return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack);
        else
                return 0;
 }
@@ -6684,7 +6820,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
        int err = 0;
 
-       if (!mlxsw_sp)
+       /* We do not create a RIF for a macvlan, but only use it to
+        * direct more MAC addresses to the router.
+        */
+       if (!mlxsw_sp || netif_is_macvlan(l3_dev))
                return 0;
 
        switch (event) {
@@ -6705,6 +6844,27 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
        return err;
 }
 
+static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
+{
+       struct mlxsw_sp_rif *rif = data;
+
+       if (!netif_is_macvlan(dev))
+               return 0;
+
+       return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
+                                  mlxsw_sp_fid_index(rif->fid), false);
+}
+
+static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
+{
+       if (!netif_is_macvlan_port(rif->dev))
+               return 0;
+
+       netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
+       return netdev_walk_all_upper_dev_rcu(rif->dev,
+                                            __mlxsw_sp_rif_macvlan_flush, rif);
+}
+
 static struct mlxsw_sp_rif_subport *
 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
 {
@@ -6771,11 +6931,13 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
        mlxsw_sp_fid_rif_set(fid, NULL);
        mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
                            mlxsw_sp_fid_index(fid), false);
+       mlxsw_sp_rif_macvlan_flush(rif);
        mlxsw_sp_rif_subport_op(rif, false);
 }
 
 static struct mlxsw_sp_fid *
-mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
+                            struct netlink_ext_ack *extack)
 {
        return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
 }
@@ -6857,6 +7019,7 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
        mlxsw_sp_fid_rif_set(fid, NULL);
        mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
                            mlxsw_sp_fid_index(fid), false);
+       mlxsw_sp_rif_macvlan_flush(rif);
        mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
                               mlxsw_sp_router_port(mlxsw_sp), false);
        mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
@@ -6865,19 +7028,49 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
 }
 
 static struct mlxsw_sp_fid *
-mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
+                         struct netlink_ext_ack *extack)
 {
-       u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
+       u16 vid;
+       int err;
+
+       if (is_vlan_dev(rif->dev)) {
+               vid = vlan_dev_vlan_id(rif->dev);
+       } else {
+               err = br_vlan_get_pvid(rif->dev, &vid);
+               if (err < 0 || !vid) {
+                       NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
+                       return ERR_PTR(-EINVAL);
+               }
+       }
 
        return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
 }
 
+static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+       u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+       struct switchdev_notifier_fdb_info info;
+       struct net_device *br_dev;
+       struct net_device *dev;
+
+       br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
+       dev = br_fdb_find_port(br_dev, mac, vid);
+       if (!dev)
+               return;
+
+       info.addr = mac;
+       info.vid = vid;
+       call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+}
+
 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
        .type                   = MLXSW_SP_RIF_TYPE_VLAN,
        .rif_size               = sizeof(struct mlxsw_sp_rif),
        .configure              = mlxsw_sp_rif_vlan_configure,
        .deconfigure            = mlxsw_sp_rif_vlan_deconfigure,
        .fid_get                = mlxsw_sp_rif_vlan_fid_get,
+       .fdb_del                = mlxsw_sp_rif_vlan_fdb_del,
 };
 
 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
@@ -6929,6 +7122,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
        mlxsw_sp_fid_rif_set(fid, NULL);
        mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
                            mlxsw_sp_fid_index(fid), false);
+       mlxsw_sp_rif_macvlan_flush(rif);
        mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
                               mlxsw_sp_router_port(mlxsw_sp), false);
        mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
@@ -6937,17 +7131,33 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
 }
 
 static struct mlxsw_sp_fid *
-mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
+                        struct netlink_ext_ack *extack)
 {
        return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
 }
 
+static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+       struct switchdev_notifier_fdb_info info;
+       struct net_device *dev;
+
+       dev = br_fdb_find_port(rif->dev, mac, 0);
+       if (!dev)
+               return;
+
+       info.addr = mac;
+       info.vid = 0;
+       call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
+}
+
 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
        .type                   = MLXSW_SP_RIF_TYPE_FID,
        .rif_size               = sizeof(struct mlxsw_sp_rif),
        .configure              = mlxsw_sp_rif_fid_configure,
        .deconfigure            = mlxsw_sp_rif_fid_deconfigure,
        .fid_get                = mlxsw_sp_rif_fid_fid_get,
+       .fdb_del                = mlxsw_sp_rif_fid_fdb_del,
 };
 
 static struct mlxsw_sp_rif_ipip_lb *
index a01edcf567971f860f59f9b8339d75b8aba786ec..52e25695625c6eb815b2dd4c23ae5e85417180cf 100644 (file)
@@ -66,6 +66,8 @@ struct mlxsw_sp_neigh_entry;
 struct mlxsw_sp_nexthop;
 struct mlxsw_sp_ipip_entry;
 
+struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+                                             const struct net_device *dev);
 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
                                           u16 rif_index);
 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
@@ -75,6 +77,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
                                   struct mlxsw_sp_rif *rif,
                                   enum mlxsw_sp_rif_counter_dir dir,
index 3d187d88cc7c5c9025cc5f445cc3fea588ca9a5d..e42d640cddab811fbeff2111309b8d2ad47bb280 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/list.h>
 #include <net/arp.h>
 #include <net/gre.h>
+#include <net/lag.h>
 #include <net/ndisc.h>
 #include <net/ip6_tunnel.h>
 
@@ -254,7 +255,9 @@ mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
        struct list_head *iter;
 
        netdev_for_each_lower_dev(lag_dev, dev, iter)
-               if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev))
+               if (netif_carrier_ok(dev) &&
+                   net_lag_port_dev_txable(dev) &&
+                   mlxsw_sp_port_dev_check(dev))
                        return dev;
 
        return NULL;
index eea5666a86b2ae341524710e678d4caf5776a18b..da94e1eb9e1693bfed9d0cb73827013c1fbea9db 100644 (file)
@@ -1135,6 +1135,39 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
        return err;
 }
 
+static int
+mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
+                               const struct net_device *br_dev,
+                               const struct switchdev_obj_port_vlan *vlan)
+{
+       struct mlxsw_sp_rif *rif;
+       struct mlxsw_sp_fid *fid;
+       u16 pvid;
+       u16 vid;
+
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
+       if (!rif)
+               return 0;
+       fid = mlxsw_sp_rif_fid(rif);
+       pvid = mlxsw_sp_fid_8021q_vid(fid);
+
+       for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+               if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+                       if (vid != pvid) {
+                               netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
+                               return -EBUSY;
+                       }
+               } else {
+                       if (vid == pvid) {
+                               netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
+                               return -EBUSY;
+                       }
+               }
+       }
+
+       return 0;
+}
+
 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
                                   const struct switchdev_obj_port_vlan *vlan,
                                   struct switchdev_trans *trans)
@@ -1146,8 +1179,18 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
        struct mlxsw_sp_bridge_port *bridge_port;
        u16 vid;
 
-       if (netif_is_bridge_master(orig_dev))
-               return -EOPNOTSUPP;
+       if (netif_is_bridge_master(orig_dev)) {
+               int err = 0;
+
+               if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
+                   br_vlan_enabled(orig_dev) &&
+                   switchdev_trans_ph_prepare(trans))
+                       err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
+                                                             orig_dev, vlan);
+               if (!err)
+                       err = -EOPNOTSUPP;
+               return err;
+       }
 
        if (switchdev_trans_ph_prepare(trans))
                return 0;
index 399e9d6993f72500bc2339b285f7f0d55de69c3e..eb437f59640daa7ddadb74e61e8203dfb2971b6a 100644 (file)
@@ -63,6 +63,7 @@ enum {
        MLXSW_TRAP_ID_LBERROR = 0x54,
        MLXSW_TRAP_ID_IPV4_OSPF = 0x55,
        MLXSW_TRAP_ID_IPV4_PIM = 0x58,
+       MLXSW_TRAP_ID_IPV4_VRRP = 0x59,
        MLXSW_TRAP_ID_RPF = 0x5C,
        MLXSW_TRAP_ID_IP2ME = 0x5F,
        MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60,
@@ -78,6 +79,7 @@ enum {
        MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
        MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
        MLXSW_TRAP_ID_IPV6_PIM = 0x79,
+       MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
        MLXSW_TRAP_ID_IPV4_BGP = 0x88,
        MLXSW_TRAP_ID_IPV6_BGP = 0x89,
        MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,
index b72d1bd11296bba36c83e1252fcfa0a397ee5c84..ebbdfb908745105470aa49950835da692bb7eba9 100644 (file)
@@ -3373,7 +3373,6 @@ static void port_get_link_speed(struct ksz_port *port)
  */
 static void port_set_link_speed(struct ksz_port *port)
 {
-       struct ksz_port_info *info;
        struct ksz_hw *hw = port->hw;
        u16 data;
        u16 cfg;
@@ -3382,8 +3381,6 @@ static void port_set_link_speed(struct ksz_port *port)
        int p;
 
        for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
-               info = &hw->port_info[p];
-
                port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
                port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
 
index dd947e4dd3ce8c2f4cdca56d9f75d33aecd9f41c..e1747a4900666ac0a27db42cbeb87b4d0ac7e06e 100644 (file)
@@ -828,7 +828,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
        }
 
        if (!mac_address_valid)
-               random_ether_addr(adapter->mac_address);
+               eth_random_addr(adapter->mac_address);
        lan743x_mac_set_address(adapter, adapter->mac_address);
        ether_addr_copy(netdev->dev_addr, adapter->mac_address);
        return 0;
index fb2c8f8071e64d3b6d52865ecaddf17f841a2b9d..1a4f2bb48ead712634ce5968e23144117d89b8d7 100644 (file)
@@ -148,12 +148,191 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
        return 0;
 }
 
+static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
+{
+       /* Select the VID to configure */
+       ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
+                    ANA_TABLES_VLANTIDX);
+       /* Set the vlan port members mask and issue a write command */
+       ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
+                            ANA_TABLES_VLANACCESS_CMD_WRITE,
+                    ANA_TABLES_VLANACCESS);
+
+       return ocelot_vlant_wait_for_completion(ocelot);
+}
+
+static void ocelot_vlan_mode(struct ocelot_port *port,
+                            netdev_features_t features)
+{
+       struct ocelot *ocelot = port->ocelot;
+       u8 p = port->chip_port;
+       u32 val;
+
+       /* Filtering */
+       val = ocelot_read(ocelot, ANA_VLANMASK);
+       if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+               val |= BIT(p);
+       else
+               val &= ~BIT(p);
+       ocelot_write(ocelot, val, ANA_VLANMASK);
+}
+
+static void ocelot_vlan_port_apply(struct ocelot *ocelot,
+                                  struct ocelot_port *port)
+{
+       u32 val;
+
+       /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+       /* Default vlan to clasify for untagged frames (may be zero) */
+       val = ANA_PORT_VLAN_CFG_VLAN_VID(port->pvid);
+       if (port->vlan_aware)
+               val |= ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+                      ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
+
+       ocelot_rmw_gix(ocelot, val,
+                      ANA_PORT_VLAN_CFG_VLAN_VID_M |
+                      ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+                      ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
+                      ANA_PORT_VLAN_CFG, port->chip_port);
+
+       /* Drop frames with multicast source address */
+       val = ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA;
+       if (port->vlan_aware && !port->vid)
+               /* If port is vlan-aware and tagged, drop untagged and priority
+                * tagged frames.
+                */
+               val |= ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
+                      ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+                      ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+       ocelot_write_gix(ocelot, val, ANA_PORT_DROP_CFG, port->chip_port);
+
+       /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q. */
+       val = REW_TAG_CFG_TAG_TPID_CFG(0);
+
+       if (port->vlan_aware) {
+               if (port->vid)
+                       /* Tag all frames except when VID == DEFAULT_VLAN */
+                       val |= REW_TAG_CFG_TAG_CFG(1);
+               else
+                       /* Tag all frames */
+                       val |= REW_TAG_CFG_TAG_CFG(3);
+       }
+       ocelot_rmw_gix(ocelot, val,
+                      REW_TAG_CFG_TAG_TPID_CFG_M |
+                      REW_TAG_CFG_TAG_CFG_M,
+                      REW_TAG_CFG, port->chip_port);
+
+       /* Set default VLAN and tag type to 8021Q. */
+       val = REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q) |
+             REW_PORT_VLAN_CFG_PORT_VID(port->vid);
+       ocelot_rmw_gix(ocelot, val,
+                      REW_PORT_VLAN_CFG_PORT_TPID_M |
+                      REW_PORT_VLAN_CFG_PORT_VID_M,
+                      REW_PORT_VLAN_CFG, port->chip_port);
+}
+
+static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
+                              bool untagged)
+{
+       struct ocelot_port *port = netdev_priv(dev);
+       struct ocelot *ocelot = port->ocelot;
+       int ret;
+
+       /* Add the port MAC address to with the right VLAN information */
+       ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
+                         ENTRYTYPE_LOCKED);
+
+       /* Make the port a member of the VLAN */
+       ocelot->vlan_mask[vid] |= BIT(port->chip_port);
+       ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+       if (ret)
+               return ret;
+
+       /* Default ingress vlan classification */
+       if (pvid)
+               port->pvid = vid;
+
+       /* Untagged egress vlan clasification */
+       if (untagged)
+               port->vid = vid;
+
+       ocelot_vlan_port_apply(ocelot, port);
+
+       return 0;
+}
+
+static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
+{
+       struct ocelot_port *port = netdev_priv(dev);
+       struct ocelot *ocelot = port->ocelot;
+       int ret;
+
+       /* 8021q removes VID 0 on module unload for all interfaces
+        * with VLAN filtering feature. We need to keep it to receive
+        * untagged traffic.
+        */
+       if (vid == 0)
+               return 0;
+
+       /* Del the port MAC address to with the right VLAN information */
+       ocelot_mact_forget(ocelot, dev->dev_addr, vid);
+
+       /* Stop the port from being a member of the vlan */
+       ocelot->vlan_mask[vid] &= ~BIT(port->chip_port);
+       ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+       if (ret)
+               return ret;
+
+       /* Ingress */
+       if (port->pvid == vid)
+               port->pvid = 0;
+
+       /* Egress */
+       if (port->vid == vid)
+               port->vid = 0;
+
+       ocelot_vlan_port_apply(ocelot, port);
+
+       return 0;
+}
+
 static void ocelot_vlan_init(struct ocelot *ocelot)
 {
+       u16 port, vid;
+
        /* Clear VLAN table, by default all ports are members of all VLANs */
        ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
                     ANA_TABLES_VLANACCESS);
        ocelot_vlant_wait_for_completion(ocelot);
+
+       /* Configure the port VLAN memberships */
+       for (vid = 1; vid < VLAN_N_VID; vid++) {
+               ocelot->vlan_mask[vid] = 0;
+               ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]);
+       }
+
+       /* Because VLAN filtering is enabled, we need VID 0 to get untagged
+        * traffic.  It is added automatically if 8021q module is loaded, but
+        * we can't rely on it since module may be not loaded.
+        */
+       ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0);
+       ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]);
+
+       /* Configure the CPU port to be VLAN aware */
+       ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
+                                ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+                                ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+                        ANA_PORT_VLAN_CFG, ocelot->num_phys_ports);
+
+       /* Set vlan ingress filter mask to all ports but the CPU port by
+        * default.
+        */
+       ocelot_write(ocelot, GENMASK(9, 0), ANA_VLANMASK);
+
+       for (port = 0; port < ocelot->num_phys_ports; port++) {
+               ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
+               ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
+       }
 }
 
 /* Watermark encode
@@ -344,10 +523,9 @@ static int ocelot_port_stop(struct net_device *dev)
 static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
 {
        ifh[0] = IFH_INJ_BYPASS;
-       ifh[1] = (0xff00 & info->port) >> 8;
+       ifh[1] = (0xf00 & info->port) >> 8;
        ifh[2] = (0xff & info->port) << 24;
-       ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) |
-                (info->tag_type << 16) | info->vid;
+       ifh[3] = (info->tag_type << 16) | info->vid;
 
        return 0;
 }
@@ -370,11 +548,13 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
                         QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
 
        info.port = BIT(port->chip_port);
-       info.cpuq = 0xff;
+       info.tag_type = IFH_TAG_TYPE_C;
+       info.vid = skb_vlan_tag_get(skb);
        ocelot_gen_ifh(ifh, &info);
 
        for (i = 0; i < IFH_LEN; i++)
-               ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+               ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
+                                QS_INJ_WR, grp);
 
        count = (skb->len + 3) / 4;
        last = skb->len % 4;
@@ -538,6 +718,20 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        struct ocelot_port *port = netdev_priv(dev);
        struct ocelot *ocelot = port->ocelot;
 
+       if (!vid) {
+               if (!port->vlan_aware)
+                       /* If the bridge is not VLAN aware and no VID was
+                        * provided, set it to pvid to ensure the MAC entry
+                        * matches incoming untagged packets
+                        */
+                       vid = port->pvid;
+               else
+                       /* If the bridge is VLAN aware a VID must be provided as
+                        * otherwise the learnt entry wouldn't match any frame.
+                        */
+                       return -EINVAL;
+       }
+
        return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
                                 ENTRYTYPE_NORMAL);
 }
@@ -689,6 +883,30 @@ static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
        return ret;
 }
 
+static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+                                 u16 vid)
+{
+       return ocelot_vlan_vid_add(dev, vid, false, true);
+}
+
+static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+                                  u16 vid)
+{
+       return ocelot_vlan_vid_del(dev, vid);
+}
+
+static int ocelot_set_features(struct net_device *dev,
+                              netdev_features_t features)
+{
+       struct ocelot_port *port = netdev_priv(dev);
+       netdev_features_t changed = dev->features ^ features;
+
+       if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
+               ocelot_vlan_mode(port, features);
+
+       return 0;
+}
+
 static const struct net_device_ops ocelot_port_netdev_ops = {
        .ndo_open                       = ocelot_port_open,
        .ndo_stop                       = ocelot_port_stop,
@@ -700,6 +918,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
        .ndo_fdb_add                    = ocelot_fdb_add,
        .ndo_fdb_del                    = ocelot_fdb_del,
        .ndo_fdb_dump                   = ocelot_fdb_dump,
+       .ndo_vlan_rx_add_vid            = ocelot_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid           = ocelot_vlan_rx_kill_vid,
+       .ndo_set_features               = ocelot_set_features,
 };
 
 static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -779,6 +1000,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
        .get_strings            = ocelot_get_strings,
        .get_ethtool_stats      = ocelot_get_ethtool_stats,
        .get_sset_count         = ocelot_get_sset_count,
+       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
 };
 
 static int ocelot_port_attr_get(struct net_device *dev,
@@ -913,6 +1136,10 @@ static int ocelot_port_attr_set(struct net_device *dev,
        case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
                ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time);
                break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+               ocelot_port->vlan_aware = attr->u.vlan_filtering;
+               ocelot_vlan_port_apply(ocelot_port->ocelot, ocelot_port);
+               break;
        case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
                ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled);
                break;
@@ -924,6 +1151,40 @@ static int ocelot_port_attr_set(struct net_device *dev,
        return err;
 }
 
+static int ocelot_port_obj_add_vlan(struct net_device *dev,
+                                   const struct switchdev_obj_port_vlan *vlan,
+                                   struct switchdev_trans *trans)
+{
+       int ret;
+       u16 vid;
+
+       for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+               ret = ocelot_vlan_vid_add(dev, vid,
+                                         vlan->flags & BRIDGE_VLAN_INFO_PVID,
+                                         vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int ocelot_port_vlan_del_vlan(struct net_device *dev,
+                                    const struct switchdev_obj_port_vlan *vlan)
+{
+       int ret;
+       u16 vid;
+
+       for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+               ret = ocelot_vlan_vid_del(dev, vid);
+
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
                                                     const unsigned char *addr,
                                                     u16 vid)
@@ -950,7 +1211,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev,
        bool new = false;
 
        if (!vid)
-               vid = 1;
+               vid = port->pvid;
 
        mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
        if (!mc) {
@@ -991,7 +1252,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
        u16 vid = mdb->vid;
 
        if (!vid)
-               vid = 1;
+               vid = port->pvid;
 
        mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
        if (!mc)
@@ -1023,6 +1284,11 @@ static int ocelot_port_obj_add(struct net_device *dev,
        int ret = 0;
 
        switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               ret = ocelot_port_obj_add_vlan(dev,
+                                              SWITCHDEV_OBJ_PORT_VLAN(obj),
+                                              trans);
+               break;
        case SWITCHDEV_OBJ_ID_PORT_MDB:
                ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
                                              trans);
@@ -1040,6 +1306,10 @@ static int ocelot_port_obj_del(struct net_device *dev,
        int ret = 0;
 
        switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               ret = ocelot_port_vlan_del_vlan(dev,
+                                               SWITCHDEV_OBJ_PORT_VLAN(obj));
+               break;
        case SWITCHDEV_OBJ_ID_PORT_MDB:
                ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
                break;
@@ -1085,6 +1355,142 @@ static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port,
 
        if (!ocelot->bridge_mask)
                ocelot->hw_bridge_dev = NULL;
+
+       /* Clear bridge vlan settings before calling ocelot_vlan_port_apply */
+       ocelot_port->vlan_aware = 0;
+       ocelot_port->pvid = 0;
+       ocelot_port->vid = 0;
+}
+
+static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
+{
+       int i, port, lag;
+
+       /* Reset destination and aggregation PGIDS */
+       for (port = 0; port < ocelot->num_phys_ports; port++)
+               ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
+
+       for (i = PGID_AGGR; i < PGID_SRC; i++)
+               ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
+                                ANA_PGID_PGID, i);
+
+       /* Now, set PGIDs for each LAG */
+       for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
+               unsigned long bond_mask;
+               int aggr_count = 0;
+               u8 aggr_idx[16];
+
+               bond_mask = ocelot->lags[lag];
+               if (!bond_mask)
+                       continue;
+
+               for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
+                       // Destination mask
+                       ocelot_write_rix(ocelot, bond_mask,
+                                        ANA_PGID_PGID, port);
+                       aggr_idx[aggr_count] = port;
+                       aggr_count++;
+               }
+
+               for (i = PGID_AGGR; i < PGID_SRC; i++) {
+                       u32 ac;
+
+                       ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
+                       ac &= ~bond_mask;
+                       ac |= BIT(aggr_idx[i % aggr_count]);
+                       ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
+               }
+       }
+}
+
+static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
+{
+       unsigned long bond_mask = ocelot->lags[lag];
+       unsigned int p;
+
+       for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) {
+               u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+
+               port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+
+               /* Use lag port as logical port for port i */
+               ocelot_write_gix(ocelot, port_cfg |
+                                ANA_PORT_PORT_CFG_PORTID_VAL(lag),
+                                ANA_PORT_PORT_CFG, p);
+       }
+}
+
+static int ocelot_port_lag_join(struct ocelot_port *ocelot_port,
+                               struct net_device *bond)
+{
+       struct ocelot *ocelot = ocelot_port->ocelot;
+       int p = ocelot_port->chip_port;
+       int lag, lp;
+       struct net_device *ndev;
+       u32 bond_mask = 0;
+
+       rcu_read_lock();
+       for_each_netdev_in_bond_rcu(bond, ndev) {
+               struct ocelot_port *port = netdev_priv(ndev);
+
+               bond_mask |= BIT(port->chip_port);
+       }
+       rcu_read_unlock();
+
+       lp = __ffs(bond_mask);
+
+       /* If the new port is the lowest one, use it as the logical port from
+        * now on
+        */
+       if (p == lp) {
+               lag = p;
+               ocelot->lags[p] = bond_mask;
+               bond_mask &= ~BIT(p);
+               if (bond_mask) {
+                       lp = __ffs(bond_mask);
+                       ocelot->lags[lp] = 0;
+               }
+       } else {
+               lag = lp;
+               ocelot->lags[lp] |= BIT(p);
+       }
+
+       ocelot_setup_lag(ocelot, lag);
+       ocelot_set_aggr_pgids(ocelot);
+
+       return 0;
+}
+
+static void ocelot_port_lag_leave(struct ocelot_port *ocelot_port,
+                                 struct net_device *bond)
+{
+       struct ocelot *ocelot = ocelot_port->ocelot;
+       int p = ocelot_port->chip_port;
+       u32 port_cfg;
+       int i;
+
+       /* Remove port from any lag */
+       for (i = 0; i < ocelot->num_phys_ports; i++)
+               ocelot->lags[i] &= ~BIT(ocelot_port->chip_port);
+
+       /* if it was the logical port of the lag, move the lag config to the
+        * next port
+        */
+       if (ocelot->lags[p]) {
+               int n = __ffs(ocelot->lags[p]);
+
+               ocelot->lags[n] = ocelot->lags[p];
+               ocelot->lags[p] = 0;
+
+               ocelot_setup_lag(ocelot, n);
+       }
+
+       port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+       port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+       ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(p),
+                        ANA_PORT_PORT_CFG, p);
+
+       ocelot_set_aggr_pgids(ocelot);
 }
 
 /* Checks if the net_device instance given to us originate from our driver. */
@@ -1112,6 +1518,17 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
                        else
                                ocelot_port_bridge_leave(ocelot_port,
                                                         info->upper_dev);
+
+                       ocelot_vlan_port_apply(ocelot_port->ocelot,
+                                              ocelot_port);
+               }
+               if (netif_is_lag_master(info->upper_dev)) {
+                       if (info->linking)
+                               err = ocelot_port_lag_join(ocelot_port,
+                                                          info->upper_dev);
+                       else
+                               ocelot_port_lag_leave(ocelot_port,
+                                                     info->upper_dev);
                }
                break;
        default:
@@ -1128,6 +1545,20 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        int ret = 0;
 
+       if (event == NETDEV_PRECHANGEUPPER &&
+           netif_is_lag_master(info->upper_dev)) {
+               struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
+               struct netlink_ext_ack *extack;
+
+               if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+                       extack = netdev_notifier_info_to_extack(&info->info);
+                       NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+
+                       ret = -EINVAL;
+                       goto notify;
+               }
+       }
+
        if (netif_is_lag_master(dev)) {
                struct net_device *slave;
                struct list_head *iter;
@@ -1175,6 +1606,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
        dev->ethtool_ops = &ocelot_ethtool_ops;
        dev->switchdev_ops = &ocelot_port_switchdev_ops;
 
+       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
        memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
        dev->dev_addr[ETH_ALEN - 1] += port;
        ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
@@ -1186,6 +1620,9 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
                goto err_register_netdev;
        }
 
+       /* Basic L2 initialization */
+       ocelot_vlan_port_apply(ocelot, ocelot_port);
+
        return 0;
 
 err_register_netdev:
@@ -1200,6 +1637,11 @@ int ocelot_init(struct ocelot *ocelot)
        int i, cpu = ocelot->num_phys_ports;
        char queue_name[32];
 
+       ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+                                   sizeof(u32), GFP_KERNEL);
+       if (!ocelot->lags)
+               return -ENOMEM;
+
        ocelot->stats = devm_kcalloc(ocelot->dev,
                                     ocelot->num_phys_ports * ocelot->num_stats,
                                     sizeof(u64), GFP_KERNEL);
index 097bd12a10d4958c36353c7b1ea38f63f307d511..616bec30dfa3fe4b31a1295ef2239a57ac2cc36d 100644 (file)
@@ -493,7 +493,7 @@ struct ocelot {
        u8 num_cpu_ports;
        struct ocelot_port **ports;
 
-       u16 lags[16];
+       u32 *lags;
 
        /* Keep track of the vlan port masks */
        u32 vlan_mask[VLAN_N_VID];
index 18df7d934e810a56b967120ac1f7e9d0507d13af..26bb3b18f3be0f9f20149e5e689c561074c60a26 100644 (file)
@@ -29,7 +29,7 @@ static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info)
        info->port = (ifh[2] & GENMASK(14, 11)) >> 11;
 
        info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20;
-       info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16;
+       info->tag_type = (ifh[3] & BIT(16)) >> 16;
        info->vid = ifh[3] & GENMASK(11, 0);
 
        return 0;
index 8a92088df0d7bf4966342631fb412b94f79a3ff4..1d9e3683540439da2ee7d6e57fec4080542ed7ba 100644 (file)
 #define pr_fmt(fmt)    "NFP net bpf: " fmt
 
 #include <linux/bug.h>
-#include <linux/kernel.h>
 #include <linux/bpf.h>
 #include <linux/filter.h>
+#include <linux/kernel.h>
 #include <linux/pkt_cls.h>
+#include <linux/reciprocal_div.h>
 #include <linux/unistd.h>
 
 #include "main.h"
@@ -415,6 +416,60 @@ emit_alu(struct nfp_prog *nfp_prog, swreg dst,
                   reg.dst_lmextn, reg.src_lmextn);
 }
 
+static void
+__emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg,
+          enum mul_type type, enum mul_step step, u16 breg, bool swap,
+          bool wr_both, bool dst_lmextn, bool src_lmextn)
+{
+       u64 insn;
+
+       insn = OP_MUL_BASE |
+               FIELD_PREP(OP_MUL_A_SRC, areg) |
+               FIELD_PREP(OP_MUL_B_SRC, breg) |
+               FIELD_PREP(OP_MUL_STEP, step) |
+               FIELD_PREP(OP_MUL_DST_AB, dst_ab) |
+               FIELD_PREP(OP_MUL_SW, swap) |
+               FIELD_PREP(OP_MUL_TYPE, type) |
+               FIELD_PREP(OP_MUL_WR_AB, wr_both) |
+               FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) |
+               FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn);
+
+       nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type,
+        enum mul_step step, swreg rreg)
+{
+       struct nfp_insn_ur_regs reg;
+       u16 areg;
+       int err;
+
+       if (type == MUL_TYPE_START && step != MUL_STEP_NONE) {
+               nfp_prog->error = -EINVAL;
+               return;
+       }
+
+       if (step == MUL_LAST || step == MUL_LAST_2) {
+               /* When type is step and step Number is LAST or LAST2, left
+                * source is used as destination.
+                */
+               err = swreg_to_unrestricted(lreg, reg_none(), rreg, &reg);
+               areg = reg.dst;
+       } else {
+               err = swreg_to_unrestricted(reg_none(), lreg, rreg, &reg);
+               areg = reg.areg;
+       }
+
+       if (err) {
+               nfp_prog->error = err;
+               return;
+       }
+
+       __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap,
+                  reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
+}
+
 static void
 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
                u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
@@ -670,7 +725,7 @@ static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
        xfer_num = round_up(len, 4) / 4;
 
        if (src_40bit_addr)
-               addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
+               addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base,
                              &off);
 
        /* Setup PREV_ALU fields to override memory read length. */
@@ -1380,6 +1435,133 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
                      SHF_SC_R_ROT, 16);
 }
 
+static void
+wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
+           swreg rreg, bool gen_high_half)
+{
+       emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
+       emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg);
+       emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg);
+       emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg);
+       emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg);
+       emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none());
+       if (gen_high_half)
+               emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2,
+                        reg_none());
+       else
+               wrp_immed(nfp_prog, dst_hi, 0);
+}
+
+static void
+wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
+           swreg rreg)
+{
+       emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
+       emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg);
+       emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg);
+       emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none());
+}
+
+static int
+wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+       bool gen_high_half, bool ropnd_from_reg)
+{
+       swreg multiplier, multiplicand, dst_hi, dst_lo;
+       const struct bpf_insn *insn = &meta->insn;
+       u32 lopnd_max, ropnd_max;
+       u8 dst_reg;
+
+       dst_reg = insn->dst_reg;
+       multiplicand = reg_a(dst_reg * 2);
+       dst_hi = reg_both(dst_reg * 2 + 1);
+       dst_lo = reg_both(dst_reg * 2);
+       lopnd_max = meta->umax_dst;
+       if (ropnd_from_reg) {
+               multiplier = reg_b(insn->src_reg * 2);
+               ropnd_max = meta->umax_src;
+       } else {
+               u32 imm = insn->imm;
+
+               multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+               ropnd_max = imm;
+       }
+       if (lopnd_max > U16_MAX || ropnd_max > U16_MAX)
+               wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier,
+                           gen_high_half);
+       else
+               wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier);
+
+       return 0;
+}
+
+static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm)
+{
+       swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst);
+       struct reciprocal_value_adv rvalue;
+       u8 pre_shift, exp;
+       swreg magic;
+
+       if (imm > U32_MAX) {
+               wrp_immed(nfp_prog, dst_both, 0);
+               return 0;
+       }
+
+       /* NOTE: because we are using "reciprocal_value_adv" which doesn't
+        * support "divisor > (1u << 31)", we need to JIT separate NFP sequence
+        * to handle such case which actually equals to the result of unsigned
+        * comparison "dst >= imm" which could be calculated using the following
+        * NFP sequence:
+        *
+        *  alu[--, dst, -, imm]
+        *  immed[imm, 0]
+        *  alu[dst, imm, +carry, 0]
+        *
+        */
+       if (imm > 1U << 31) {
+               swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+
+               emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b);
+               wrp_immed(nfp_prog, imm_a(nfp_prog), 0);
+               emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C,
+                        reg_imm(0));
+               return 0;
+       }
+
+       rvalue = reciprocal_value_adv(imm, 32);
+       exp = rvalue.exp;
+       if (rvalue.is_wide_m && !(imm & 1)) {
+               pre_shift = fls(imm & -imm) - 1;
+               rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift);
+       } else {
+               pre_shift = 0;
+       }
+       magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog));
+       if (imm == 1U << exp) {
+               emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+                        SHF_SC_R_SHF, exp);
+       } else if (rvalue.is_wide_m) {
+               wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a,
+                           magic, true);
+               emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB,
+                        imm_b(nfp_prog));
+               emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+                        SHF_SC_R_SHF, 1);
+               emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD,
+                        imm_b(nfp_prog));
+               emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
+                        SHF_SC_R_SHF, rvalue.sh - 1);
+       } else {
+               if (pre_shift)
+                       emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
+                                dst_b, SHF_SC_R_SHF, pre_shift);
+               wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true);
+               emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
+                        dst_b, SHF_SC_R_SHF, rvalue.sh);
+       }
+
+       return 0;
+}
+
 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
        swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
@@ -1684,6 +1866,31 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
        return 0;
 }
 
+static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       return wrp_mul(nfp_prog, meta, true, true);
+}
+
+static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       return wrp_mul(nfp_prog, meta, true, false);
+}
+
+static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       const struct bpf_insn *insn = &meta->insn;
+
+       return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm);
+}
+
+static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       /* NOTE: verifier hook has rejected cases for which verifier doesn't
+        * know whether the source operand is constant or not.
+        */
+       return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src);
+}
+
 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
        const struct bpf_insn *insn = &meta->insn;
@@ -1772,8 +1979,8 @@ static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
        u8 dst, src;
 
        dst = insn->dst_reg * 2;
-       umin = meta->umin;
-       umax = meta->umax;
+       umin = meta->umin_src;
+       umax = meta->umax_src;
        if (umin == umax)
                return __shl_imm64(nfp_prog, dst, umin);
 
@@ -1881,8 +2088,8 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
        u8 dst, src;
 
        dst = insn->dst_reg * 2;
-       umin = meta->umin;
-       umax = meta->umax;
+       umin = meta->umin_src;
+       umax = meta->umax_src;
        if (umin == umax)
                return __shr_imm64(nfp_prog, dst, umin);
 
@@ -1995,8 +2202,8 @@ static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
        u8 dst, src;
 
        dst = insn->dst_reg * 2;
-       umin = meta->umin;
-       umax = meta->umax;
+       umin = meta->umin_src;
+       umax = meta->umax_src;
        if (umin == umax)
                return __ashr_imm64(nfp_prog, dst, umin);
 
@@ -2097,6 +2304,26 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
        return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
 }
 
+static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       return wrp_mul(nfp_prog, meta, false, true);
+}
+
+static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       return wrp_mul(nfp_prog, meta, false, false);
+}
+
+static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       return div_reg64(nfp_prog, meta);
+}
+
+static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+       return div_imm64(nfp_prog, meta);
+}
+
 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
        u8 dst = meta->insn.dst_reg * 2;
@@ -2848,6 +3075,10 @@ static const instr_cb_t instr_cb[256] = {
        [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
        [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
        [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+       [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64,
+       [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64,
+       [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64,
+       [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64,
        [BPF_ALU64 | BPF_NEG] =         neg_reg64,
        [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64,
        [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
@@ -2867,6 +3098,10 @@ static const instr_cb_t instr_cb[256] = {
        [BPF_ALU | BPF_ADD | BPF_K] =   add_imm,
        [BPF_ALU | BPF_SUB | BPF_X] =   sub_reg,
        [BPF_ALU | BPF_SUB | BPF_K] =   sub_imm,
+       [BPF_ALU | BPF_MUL | BPF_X] =   mul_reg,
+       [BPF_ALU | BPF_MUL | BPF_K] =   mul_imm,
+       [BPF_ALU | BPF_DIV | BPF_X] =   div_reg,
+       [BPF_ALU | BPF_DIV | BPF_K] =   div_imm,
        [BPF_ALU | BPF_NEG] =           neg_reg,
        [BPF_ALU | BPF_LSH | BPF_K] =   shl_imm,
        [BPF_ALU | BPF_END | BPF_X] =   end_reg32,
@@ -3299,7 +3534,8 @@ curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
        if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
                return false;
 
-       if (ld_meta->ptr.type != PTR_TO_PACKET)
+       if (ld_meta->ptr.type != PTR_TO_PACKET &&
+           ld_meta->ptr.type != PTR_TO_MAP_VALUE)
                return false;
 
        if (st_meta->ptr.type != PTR_TO_PACKET)
index fcdfb8e7fdeab0b9dcb353f4cd4a7d76370c9817..b95b94d008cfa4436b5a7637101e9276f9135685 100644 (file)
@@ -66,26 +66,19 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
                    struct bpf_prog *prog, struct netlink_ext_ack *extack)
 {
        bool running, xdp_running;
-       int ret;
 
        if (!nfp_net_ebpf_capable(nn))
                return -EINVAL;
 
        running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
-       xdp_running = running && nn->dp.bpf_offload_xdp;
+       xdp_running = running && nn->xdp_hw.prog;
 
        if (!prog && !xdp_running)
                return 0;
        if (prog && running && !xdp_running)
                return -EBUSY;
 
-       ret = nfp_net_bpf_offload(nn, prog, running, extack);
-       /* Stop offload if replace not possible */
-       if (ret && prog)
-               nfp_bpf_xdp_offload(app, nn, NULL, extack);
-
-       nn->dp.bpf_offload_xdp = prog && !ret;
-       return ret;
+       return nfp_net_bpf_offload(nn, prog, running, extack);
 }
 
 static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
@@ -202,11 +195,14 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                return -EOPNOTSUPP;
 
+       if (tcf_block_shared(f->block))
+               return -EOPNOTSUPP;
+
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
                                             nfp_bpf_setup_tc_block_cb,
-                                            nn, nn);
+                                            nn, nn, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block,
                                        nfp_bpf_setup_tc_block_cb,
index 654fe7823e5ed4ab82a1b1e0c35fa43c1486bb28..9845c1a2d4c26c08ca42baa17163edce9274a101 100644 (file)
@@ -263,8 +263,10 @@ struct nfp_bpf_reg_state {
  * @func_id: function id for call instructions
  * @arg1: arg1 for call instructions
  * @arg2: arg2 for call instructions
- * @umin: copy of core verifier umin_value.
- * @umax: copy of core verifier umax_value.
+ * @umin_src: copy of core verifier umin_value for src opearnd.
+ * @umax_src: copy of core verifier umax_value for src operand.
+ * @umin_dst: copy of core verifier umin_value for dst opearnd.
+ * @umax_dst: copy of core verifier umax_value for dst operand.
  * @off: index of first generated machine instruction (in nfp_prog.prog)
  * @n: eBPF instruction number
  * @flags: eBPF instruction extra optimization flags
@@ -300,12 +302,15 @@ struct nfp_insn_meta {
                        struct bpf_reg_state arg1;
                        struct nfp_bpf_reg_state arg2;
                };
-               /* We are interested in range info for some operands,
-                * for example, the shift amount.
+               /* We are interested in range info for operands of ALU
+                * operations. For example, shift amount, multiplicand and
+                * multiplier etc.
                 */
                struct {
-                       u64 umin;
-                       u64 umax;
+                       u64 umin_src;
+                       u64 umax_src;
+                       u64 umin_dst;
+                       u64 umax_dst;
                };
        };
        unsigned int off;
@@ -339,6 +344,11 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
        return BPF_MODE(meta->insn.code);
 }
 
+static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
+{
+       return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
+}
+
 static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
 {
        return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
@@ -384,23 +394,14 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
        return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
 }
 
-static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
 {
-       u8 code = meta->insn.code;
-       bool is_alu, is_shift;
-       u8 opclass, opcode;
-
-       opclass = BPF_CLASS(code);
-       is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU;
-       if (!is_alu)
-               return false;
-
-       opcode = BPF_OP(code);
-       is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH;
-       if (!is_shift)
-               return false;
+       return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
+}
 
-       return BPF_SRC(code) == BPF_X;
+static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
+{
+       return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
 }
 
 /**
index 7eae4c0266f811247a1b5e4ad6aadf70da44a2c5..78f44c4d95b4914755dfeb806c8d5e85f5463e65 100644 (file)
@@ -190,8 +190,10 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
 
                meta->insn = prog[i];
                meta->n = i;
-               if (is_mbpf_indir_shift(meta))
-                       meta->umin = U64_MAX;
+               if (is_mbpf_alu(meta)) {
+                       meta->umin_src = U64_MAX;
+                       meta->umin_dst = U64_MAX;
+               }
 
                list_add_tail(&meta->l, &nfp_prog->insns);
        }
index 4bfeba7b21b226d03b573617079490a4aa69f0a5..49ba0d645d36df879d6885309603426c383a17e6 100644 (file)
@@ -516,6 +516,82 @@ nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
        return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
 }
 
+static int
+nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+                 struct bpf_verifier_env *env)
+{
+       const struct bpf_reg_state *sreg =
+               cur_regs(env) + meta->insn.src_reg;
+       const struct bpf_reg_state *dreg =
+               cur_regs(env) + meta->insn.dst_reg;
+
+       meta->umin_src = min(meta->umin_src, sreg->umin_value);
+       meta->umax_src = max(meta->umax_src, sreg->umax_value);
+       meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
+       meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
+
+       /* NFP supports u16 and u32 multiplication.
+        *
+        * For ALU64, if either operand is beyond u32's value range, we reject
+        * it. One thing to note, if the source operand is BPF_K, then we need
+        * to check "imm" field directly, and we'd reject it if it is negative.
+        * Because for ALU64, "imm" (with s32 type) is expected to be sign
+        * extended to s64 which NFP mul doesn't support.
+        *
+        * For ALU32, it is fine for "imm" be negative though, because the
+        * result is 32-bits and there is no difference on the low halve of
+        * the result for signed/unsigned mul, so we will get correct result.
+        */
+       if (is_mbpf_mul(meta)) {
+               if (meta->umax_dst > U32_MAX) {
+                       pr_vlog(env, "multiplier is not within u32 value range\n");
+                       return -EINVAL;
+               }
+               if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
+                       pr_vlog(env, "multiplicand is not within u32 value range\n");
+                       return -EINVAL;
+               }
+               if (mbpf_class(meta) == BPF_ALU64 &&
+                   mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
+                       pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n");
+                       return -EINVAL;
+               }
+       }
+
+       /* NFP doesn't have divide instructions, we support divide by constant
+        * through reciprocal multiplication. Given NFP support multiplication
+        * no bigger than u32, we'd require divisor and dividend no bigger than
+        * that as well.
+        *
+        * Also eBPF doesn't support signed divide and has enforced this on C
+        * language level by failing compilation. However LLVM assembler hasn't
+        * enforced this, so it is possible for negative constant to leak in as
+        * a BPF_K operand through assembly code, we reject such cases as well.
+        */
+       if (is_mbpf_div(meta)) {
+               if (meta->umax_dst > U32_MAX) {
+                       pr_vlog(env, "dividend is not within u32 value range\n");
+                       return -EINVAL;
+               }
+               if (mbpf_src(meta) == BPF_X) {
+                       if (meta->umin_src != meta->umax_src) {
+                               pr_vlog(env, "divisor is not constant\n");
+                               return -EINVAL;
+                       }
+                       if (meta->umax_src > U32_MAX) {
+                               pr_vlog(env, "divisor is not within u32 value range\n");
+                               return -EINVAL;
+                       }
+               }
+               if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
+                       pr_vlog(env, "divide by negative constant is not supported\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static int
 nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
 {
@@ -551,13 +627,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
        if (is_mbpf_xadd(meta))
                return nfp_bpf_check_xadd(nfp_prog, meta, env);
 
-       if (is_mbpf_indir_shift(meta)) {
-               const struct bpf_reg_state *sreg =
-                       cur_regs(env) + meta->insn.src_reg;
-
-               meta->umin = min(meta->umin, sreg->umin_value);
-               meta->umax = max(meta->umax, sreg->umax_value);
-       }
+       if (is_mbpf_alu(meta))
+               return nfp_bpf_check_alu(nfp_prog, meta, env);
 
        return 0;
 }
index 4a6d2db750719a7d144e337f0aea9e8daf5a7104..e56b815a8dc6c81729083eb7abe717402a6a347b 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/bitfield.h>
 #include <net/pkt_cls.h>
 #include <net/switchdev.h>
+#include <net/tc_act/tc_csum.h>
 #include <net/tc_act/tc_gact.h>
 #include <net/tc_act/tc_mirred.h>
 #include <net/tc_act/tc_pedit.h>
@@ -44,6 +45,8 @@
 #include "main.h"
 #include "../nfp_net_repr.h"
 
+#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS    (TUNNEL_CSUM | TUNNEL_KEY)
+
 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
 {
        size_t act_size = sizeof(struct nfp_fl_pop_vlan);
@@ -235,9 +238,12 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
        size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
        struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
        u32 tmp_set_ip_tun_type_index = 0;
+       struct flowi4 flow = {};
        /* Currently support one pre-tunnel so index is always 0. */
        int pretun_idx = 0;
+       struct rtable *rt;
        struct net *net;
+       int err;
 
        if (ip_tun->options_len)
                return -EOPNOTSUPP;
@@ -254,7 +260,28 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
 
        set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
        set_tun->tun_id = ip_tun->key.tun_id;
-       set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+
+       /* Do a route lookup to determine ttl - if fails then use default.
+        * Note that CONFIG_INET is a requirement of CONFIG_NET_SWITCHDEV so
+        * must be defined here.
+        */
+       flow.daddr = ip_tun->key.u.ipv4.dst;
+       flow.flowi4_proto = IPPROTO_UDP;
+       rt = ip_route_output_key(net, &flow);
+       err = PTR_ERR_OR_ZERO(rt);
+       if (!err) {
+               set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
+               ip_rt_put(rt);
+       } else {
+               set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+       }
+
+       set_tun->tos = ip_tun->key.tos;
+
+       if (!(ip_tun->key.tun_flags & TUNNEL_KEY) ||
+           ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
+               return -EOPNOTSUPP;
+       set_tun->tun_flags = ip_tun->key.tun_flags;
 
        /* Complete pre_tunnel action. */
        pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
@@ -398,8 +425,27 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
        return 0;
 }
 
+static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
+{
+       switch (ip_proto) {
+       case 0:
+               /* Filter doesn't force proto match,
+                * both TCP and UDP will be updated if encountered
+                */
+               return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
+       case IPPROTO_TCP:
+               return TCA_CSUM_UPDATE_FLAG_TCP;
+       case IPPROTO_UDP:
+               return TCA_CSUM_UPDATE_FLAG_UDP;
+       default:
+               /* All other protocols will be ignored by FW */
+               return 0;
+       }
+}
+
 static int
-nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
+nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
+            char *nfp_action, int *a_len, u32 *csum_updated)
 {
        struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
        struct nfp_fl_set_ip4_addrs set_ip_addr;
@@ -409,6 +455,7 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
        int idx, nkeys, err;
        size_t act_size;
        u32 offset, cmd;
+       u8 ip_proto = 0;
 
        memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
        memset(&set_ip6_src, 0, sizeof(set_ip6_src));
@@ -451,6 +498,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
                        return err;
        }
 
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+               struct flow_dissector_key_basic *basic;
+
+               basic = skb_flow_dissector_target(flow->dissector,
+                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 flow->key);
+               ip_proto = basic->ip_proto;
+       }
+
        if (set_eth.head.len_lw) {
                act_size = sizeof(set_eth);
                memcpy(nfp_action, &set_eth, act_size);
@@ -459,6 +515,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
                act_size = sizeof(set_ip_addr);
                memcpy(nfp_action, &set_ip_addr, act_size);
                *a_len += act_size;
+
+               /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
+               *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
+                               nfp_fl_csum_l4_to_flag(ip_proto);
        } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
                /* TC compiles set src and dst IPv6 address as a single action,
                 * the hardware requires this to be 2 separate actions.
@@ -471,18 +531,30 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
                memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
                       act_size);
                *a_len += act_size;
+
+               /* Hardware will automatically fix TCP/UDP checksum. */
+               *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
        } else if (set_ip6_dst.head.len_lw) {
                act_size = sizeof(set_ip6_dst);
                memcpy(nfp_action, &set_ip6_dst, act_size);
                *a_len += act_size;
+
+               /* Hardware will automatically fix TCP/UDP checksum. */
+               *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
        } else if (set_ip6_src.head.len_lw) {
                act_size = sizeof(set_ip6_src);
                memcpy(nfp_action, &set_ip6_src, act_size);
                *a_len += act_size;
+
+               /* Hardware will automatically fix TCP/UDP checksum. */
+               *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
        } else if (set_tport.head.len_lw) {
                act_size = sizeof(set_tport);
                memcpy(nfp_action, &set_tport, act_size);
                *a_len += act_size;
+
+               /* Hardware will automatically fix TCP/UDP checksum. */
+               *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
        }
 
        return 0;
@@ -493,12 +565,18 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
                         struct nfp_fl_payload *nfp_fl, int *a_len,
                         struct net_device *netdev, bool last,
                         enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
-                        int *out_cnt)
+                        int *out_cnt, u32 *csum_updated)
 {
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_output *output;
        int err, prelag_size;
 
+       /* If csum_updated has not been reset by now, it means HW will
+        * incorrectly update csums when they are not requested.
+        */
+       if (*csum_updated)
+               return -EOPNOTSUPP;
+
        if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
                return -EOPNOTSUPP;
 
@@ -529,10 +607,11 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
 
 static int
 nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
+                      struct tc_cls_flower_offload *flow,
                       struct nfp_fl_payload *nfp_fl, int *a_len,
                       struct net_device *netdev,
                       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
-                      int *out_cnt)
+                      int *out_cnt, u32 *csum_updated)
 {
        struct nfp_fl_set_ipv4_udp_tun *set_tun;
        struct nfp_fl_pre_tunnel *pre_tun;
@@ -545,14 +624,14 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
        } else if (is_tcf_mirred_egress_redirect(a)) {
                err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
                                               true, tun_type, tun_out_cnt,
-                                              out_cnt);
+                                              out_cnt, csum_updated);
                if (err)
                        return err;
 
        } else if (is_tcf_mirred_egress_mirror(a)) {
                err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
                                               false, tun_type, tun_out_cnt,
-                                              out_cnt);
+                                              out_cnt, csum_updated);
                if (err)
                        return err;
 
@@ -602,8 +681,17 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
                /* Tunnel decap is handled by default so accept action. */
                return 0;
        } else if (is_tcf_pedit(a)) {
-               if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
+               if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
+                                a_len, csum_updated))
+                       return -EOPNOTSUPP;
+       } else if (is_tcf_csum(a)) {
+               /* csum action requests recalc of something we have not fixed */
+               if (tcf_csum_update_flags(a) & ~*csum_updated)
                        return -EOPNOTSUPP;
+               /* If we will correctly fix the csum we can remove it from the
+                * csum update list. Which will later be used to check support.
+                */
+               *csum_updated &= ~tcf_csum_update_flags(a);
        } else {
                /* Currently we do not handle any other actions. */
                return -EOPNOTSUPP;
@@ -620,6 +708,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
        int act_len, act_cnt, err, tun_out_cnt, out_cnt;
        enum nfp_flower_tun_type tun_type;
        const struct tc_action *a;
+       u32 csum_updated = 0;
        LIST_HEAD(actions);
 
        memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -632,8 +721,9 @@ int nfp_flower_compile_action(struct nfp_app *app,
 
        tcf_exts_to_list(flow->exts, &actions);
        list_for_each_entry(a, &actions, list) {
-               err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev,
-                                            &tun_type, &tun_out_cnt, &out_cnt);
+               err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
+                                            netdev, &tun_type, &tun_out_cnt,
+                                            &out_cnt, &csum_updated);
                if (err)
                        return err;
                act_cnt++;
index 4a7f3510a2968154e9c4f78d8b2e14b673789a4e..15f1eacd76b6d381389e27a4ec860d1c2040bacb 100644 (file)
@@ -203,9 +203,9 @@ struct nfp_fl_set_ipv4_udp_tun {
        __be16 reserved;
        __be64 tun_id __packed;
        __be32 tun_type_index;
-       __be16 reserved2;
+       __be16 tun_flags;
        u8 ttl;
-       u8 reserved3;
+       u8 tos;
        __be32 extra[2];
 };
 
index 0c4c957717ea4b780d184b5577745cca8a5bf2f0..bf10598f66ae056a488074592ec4551fada4c319 100644 (file)
@@ -564,8 +564,9 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
        if (lag_upper_info &&
            lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
            (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
-           (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
-           lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) {
+            (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
+             lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
+             lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
                can_offload = false;
                nfp_flower_cmsg_warn(priv->app,
                                     "Unable to offload tx_type %u hash %u\n",
index bbe5764d26cb777f4292b82f4bbc78464706d9b7..ef2114d133872696cdb9ebbdeb44a97269079e57 100644 (file)
@@ -73,7 +73,7 @@ struct nfp_app;
 
 struct nfp_fl_mask_id {
        struct circ_buf mask_id_free_list;
-       struct timespec64 *last_used;
+       ktime_t *last_used;
        u8 init_unallocated;
 };
 
index 91935405f5861678077c188328d365ed5cb2ba7f..84f7a5dbea9d5bf17abd88416cc5a41f2fa4770b 100644 (file)
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
                         NFP_FLOWER_MASK_MPLS_Q;
 
                frame->mpls_lse = cpu_to_be32(t_mpls);
+       } else if (dissector_uses_key(flow->dissector,
+                                     FLOW_DISSECTOR_KEY_BASIC)) {
+               /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
+                * bit, which indicates an mpls ether type but without any
+                * mpls fields.
+                */
+               struct flow_dissector_key_basic *key_basic;
+
+               key_basic = skb_flow_dissector_target(flow->dissector,
+                                                     FLOW_DISSECTOR_KEY_BASIC,
+                                                     flow->key);
+               if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
+                   key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
+                       frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
        }
 }
 
index 93fb809f50d1a7b0b6577c6a76ac0610e6797735..c098730544b76dae8ea6cab5ef5712cc40855c3e 100644 (file)
@@ -158,7 +158,6 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
 {
        struct nfp_flower_priv *priv = app->priv;
        struct circ_buf *ring;
-       struct timespec64 now;
 
        ring = &priv->mask_ids.mask_id_free_list;
        /* Checking if buffer is full. */
@@ -169,8 +168,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
        ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
                     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
 
-       getnstimeofday64(&now);
-       priv->mask_ids.last_used[mask_id] = now;
+       priv->mask_ids.last_used[mask_id] = ktime_get();
 
        return 0;
 }
@@ -178,7 +176,7 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
 static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
 {
        struct nfp_flower_priv *priv = app->priv;
-       struct timespec64 delta, now;
+       ktime_t reuse_timeout;
        struct circ_buf *ring;
        u8 temp_id, freed_id;
 
@@ -198,10 +196,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
        memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
        *mask_id = temp_id;
 
-       getnstimeofday64(&now);
-       delta = timespec64_sub(now, priv->mask_ids.last_used[*mask_id]);
+       reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
+                                    NFP_FL_MASK_REUSE_TIME_NS);
 
-       if (timespec64_to_ns(&delta) < NFP_FL_MASK_REUSE_TIME_NS)
+       if (ktime_before(ktime_get(), reuse_timeout))
                goto err_not_found;
 
        memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
index c42e64f32333f84640ff913b61ff199701e1b404..43b9bf12b17426b64279721f12313f5c34fbb3e7 100644 (file)
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                case cpu_to_be16(ETH_P_ARP):
                        return -EOPNOTSUPP;
 
+               case cpu_to_be16(ETH_P_MPLS_UC):
+               case cpu_to_be16(ETH_P_MPLS_MC):
+                       if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+                               key_layer |= NFP_FLOWER_LAYER_MAC;
+                               key_size += sizeof(struct nfp_flower_mac_mpls);
+                       }
+                       break;
+
                /* Will be included in layer 2. */
                case cpu_to_be16(ETH_P_8021Q):
                        break;
@@ -576,9 +584,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
                return nfp_flower_del_offload(app, netdev, flower, egress);
        case TC_CLSFLOWER_STATS:
                return nfp_flower_get_stats(app, netdev, flower, egress);
+       default:
+               return -EOPNOTSUPP;
        }
-
-       return -EOPNOTSUPP;
 }
 
 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
@@ -623,11 +631,14 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                return -EOPNOTSUPP;
 
+       if (tcf_block_shared(f->block))
+               return -EOPNOTSUPP;
+
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
                                             nfp_flower_setup_tc_block_cb,
-                                            repr, repr);
+                                            repr, repr, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block,
                                        nfp_flower_setup_tc_block_cb,
index f6677bc9875adea9dbfe842b15a6e6d53fc0915f..cdc4e065f6f50d8dff19dbac983547b5b211a804 100644 (file)
@@ -426,4 +426,32 @@ static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset)
        return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR;
 }
 
+enum mul_type {
+       MUL_TYPE_START          = 0x00,
+       MUL_TYPE_STEP_24x8      = 0x01,
+       MUL_TYPE_STEP_16x16     = 0x02,
+       MUL_TYPE_STEP_32x32     = 0x03,
+};
+
+enum mul_step {
+       MUL_STEP_1              = 0x00,
+       MUL_STEP_NONE           = MUL_STEP_1,
+       MUL_STEP_2              = 0x01,
+       MUL_STEP_3              = 0x02,
+       MUL_STEP_4              = 0x03,
+       MUL_LAST                = 0x04,
+       MUL_LAST_2              = 0x05,
+};
+
+#define OP_MUL_BASE            0x0f800000000ULL
+#define OP_MUL_A_SRC           0x000000003ffULL
+#define OP_MUL_B_SRC           0x000000ffc00ULL
+#define OP_MUL_STEP            0x00000700000ULL
+#define OP_MUL_DST_AB          0x00000800000ULL
+#define OP_MUL_SW              0x00040000000ULL
+#define OP_MUL_TYPE            0x00180000000ULL
+#define OP_MUL_WR_AB           0x20000000000ULL
+#define OP_MUL_SRC_LMEXTN      0x40000000000ULL
+#define OP_MUL_DST_LMEXTN      0x80000000000ULL
+
 #endif
index 2a71a9ffd095a87e19c4546fc0838dafd7cbb28e..8970ec981e11387fbbf866d841dd6968fe6ad312 100644 (file)
@@ -485,7 +485,6 @@ struct nfp_stat_pair {
  * @dev:               Backpointer to struct device
  * @netdev:            Backpointer to net_device structure
  * @is_vf:             Is the driver attached to a VF?
- * @bpf_offload_xdp:   Offloaded BPF program is XDP
  * @chained_metadata_format:  Firemware will use new metadata format
  * @rx_dma_dir:                Mapping direction for RX buffers
  * @rx_dma_off:                Offset at which DMA packets (for XDP headroom)
@@ -510,7 +509,6 @@ struct nfp_net_dp {
        struct net_device *netdev;
 
        u8 is_vf:1;
-       u8 bpf_offload_xdp:1;
        u8 chained_metadata_format:1;
 
        u8 rx_dma_dir;
@@ -553,8 +551,8 @@ struct nfp_net_dp {
  * @rss_cfg:            RSS configuration
  * @rss_key:            RSS secret key
  * @rss_itbl:           RSS indirection table
- * @xdp_flags:         Flags with which XDP prog was loaded
- * @xdp_prog:          XDP prog (for ctrl path, both DRV and HW modes)
+ * @xdp:               Information about the driver XDP program
+ * @xdp_hw:            Information about the HW XDP program
  * @max_r_vecs:                Number of allocated interrupt vectors for RX/TX
  * @max_tx_rings:       Maximum number of TX rings supported by the Firmware
  * @max_rx_rings:       Maximum number of RX rings supported by the Firmware
@@ -610,8 +608,8 @@ struct nfp_net {
        u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
        u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
 
-       u32 xdp_flags;
-       struct bpf_prog *xdp_prog;
+       struct xdp_attachment_info xdp;
+       struct xdp_attachment_info xdp_hw;
 
        unsigned int max_tx_rings;
        unsigned int max_rx_rings;
index d4c27f849f9bbfae5d2d9e795fe28a000839bc07..a712e83c3f0f84ee1301ddf2859b9d86e009a90c 100644 (file)
@@ -945,11 +945,12 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
 
 /**
  * nfp_net_tx_complete() - Handled completed TX packets
- * @tx_ring:   TX ring structure
+ * @tx_ring:   TX ring structure
+ * @budget:    NAPI budget (only used as bool to determine if in NAPI context)
  *
  * Return: Number of completed TX descriptors
  */
-static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
+static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
 {
        struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
        struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
@@ -999,7 +1000,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
 
                /* check for last gather fragment */
                if (fidx == nr_frags - 1)
-                       dev_consume_skb_any(skb);
+                       napi_consume_skb(skb, budget);
 
                tx_ring->txbufs[idx].dma_addr = 0;
                tx_ring->txbufs[idx].skb = NULL;
@@ -1709,8 +1710,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                        }
                }
 
-               if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
-                                 dp->bpf_offload_xdp) && !meta.portid) {
+               if (xdp_prog && !meta.portid) {
                        void *orig_data = rxbuf->frag + pkt_off;
                        unsigned int dma_off;
                        int act;
@@ -1828,7 +1828,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
        unsigned int pkts_polled = 0;
 
        if (r_vec->tx_ring)
-               nfp_net_tx_complete(r_vec->tx_ring);
+               nfp_net_tx_complete(r_vec->tx_ring, budget);
        if (r_vec->rx_ring)
                pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
 
@@ -2062,7 +2062,7 @@ static void nfp_ctrl_poll(unsigned long arg)
        struct nfp_net_r_vector *r_vec = (void *)arg;
 
        spin_lock_bh(&r_vec->lock);
-       nfp_net_tx_complete(r_vec->tx_ring);
+       nfp_net_tx_complete(r_vec->tx_ring, 0);
        __nfp_ctrl_tx_queued(r_vec);
        spin_unlock_bh(&r_vec->lock);
 
@@ -3115,6 +3115,21 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void nfp_net_netpoll(struct net_device *netdev)
+{
+       struct nfp_net *nn = netdev_priv(netdev);
+       int i;
+
+       /* nfp_net's NAPIs are statically allocated so even if there is a race
+        * with reconfig path this will simply try to schedule some disabled
+        * NAPI instances.
+        */
+       for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
+               napi_schedule_irqoff(&nn->r_vecs[i].napi);
+}
+#endif
+
 static void nfp_net_stat64(struct net_device *netdev,
                           struct rtnl_link_stats64 *stats)
 {
@@ -3377,14 +3392,18 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev,
                nfp_net_set_vxlan_port(nn, idx, 0);
 }
 
-static int
-nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
-                     struct netlink_ext_ack *extack)
+static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
 {
+       struct bpf_prog *prog = bpf->prog;
        struct nfp_net_dp *dp;
+       int err;
+
+       if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
+               return -EBUSY;
 
        if (!prog == !nn->dp.xdp_prog) {
                WRITE_ONCE(nn->dp.xdp_prog, prog);
+               xdp_attachment_setup(&nn->xdp, bpf);
                return 0;
        }
 
@@ -3398,38 +3417,26 @@ nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
        dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
 
        /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
-       return nfp_net_ring_reconfig(nn, dp, extack);
+       err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
+       if (err)
+               return err;
+
+       xdp_attachment_setup(&nn->xdp, bpf);
+       return 0;
 }
 
-static int
-nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
-                 struct netlink_ext_ack *extack)
+static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
 {
-       struct bpf_prog *drv_prog, *offload_prog;
        int err;
 
-       if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES)
+       if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
                return -EBUSY;
 
-       /* Load both when no flags set to allow easy activation of driver path
-        * when program is replaced by one which can't be offloaded.
-        */
-       drv_prog     = flags & XDP_FLAGS_HW_MODE  ? NULL : prog;
-       offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog;
-
-       err = nfp_net_xdp_setup_drv(nn, drv_prog, extack);
+       err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
        if (err)
                return err;
 
-       err = nfp_app_xdp_offload(nn->app, nn, offload_prog, extack);
-       if (err && flags & XDP_FLAGS_HW_MODE)
-               return err;
-
-       if (nn->xdp_prog)
-               bpf_prog_put(nn->xdp_prog);
-       nn->xdp_prog = prog;
-       nn->xdp_flags = flags;
-
+       xdp_attachment_setup(&nn->xdp_hw, bpf);
        return 0;
 }
 
@@ -3439,16 +3446,13 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
 
        switch (xdp->command) {
        case XDP_SETUP_PROG:
+               return nfp_net_xdp_setup_drv(nn, xdp);
        case XDP_SETUP_PROG_HW:
-               return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags,
-                                        xdp->extack);
+               return nfp_net_xdp_setup_hw(nn, xdp);
        case XDP_QUERY_PROG:
-               xdp->prog_attached = !!nn->xdp_prog;
-               if (nn->dp.bpf_offload_xdp)
-                       xdp->prog_attached = XDP_ATTACHED_HW;
-               xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
-               xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
-               return 0;
+               return xdp_attachment_query(&nn->xdp, xdp);
+       case XDP_QUERY_PROG_HW:
+               return xdp_attachment_query(&nn->xdp_hw, xdp);
        default:
                return nfp_app_bpf(nn->app, nn, xdp);
        }
@@ -3482,6 +3486,9 @@ const struct net_device_ops nfp_net_netdev_ops = {
        .ndo_get_stats64        = nfp_net_stat64,
        .ndo_vlan_rx_add_vid    = nfp_net_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = nfp_net_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = nfp_net_netpoll,
+#endif
        .ndo_set_vf_mac         = nfp_app_set_vf_mac,
        .ndo_set_vf_vlan        = nfp_app_set_vf_vlan,
        .ndo_set_vf_spoofchk    = nfp_app_set_vf_spoofchk,
index 26d1cc4e2906132c5772d965db885b237c9d112c..6a79c8e4a7a404a9ae48ca8bf3eae492cc3783ad 100644 (file)
@@ -233,12 +233,10 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
 static void
 nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
 {
-       struct nfp_app *app;
-
-       app = nfp_app_from_netdev(netdev);
-       if (!app)
-               return;
+       struct nfp_app *app = nfp_app_from_netdev(netdev);
 
+       strlcpy(drvinfo->bus_info, pci_name(app->pdev),
+               sizeof(drvinfo->bus_info));
        nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
 }
 
@@ -452,7 +450,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
 {
        struct nfp_net *nn = netdev_priv(netdev);
 
-       return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS;
+       return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS;
 }
 
 static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -460,7 +458,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
        struct nfp_net *nn = netdev_priv(netdev);
        int i;
 
-       for (i = 0; i < nn->dp.num_r_vecs; i++) {
+       for (i = 0; i < nn->max_r_vecs; i++) {
                data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
                data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
                data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
@@ -486,7 +484,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
        u64 tmp[NN_RVEC_GATHER_STATS];
        unsigned int i, j;
 
-       for (i = 0; i < nn->dp.num_r_vecs; i++) {
+       for (i = 0; i < nn->max_r_vecs; i++) {
                unsigned int start;
 
                do {
@@ -521,15 +519,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
        return data;
 }
 
-static unsigned int
-nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings)
+static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs)
 {
-       return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2;
+       return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4;
 }
 
 static u8 *
-nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
-                             unsigned int tx_rings, bool repr)
+nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
 {
        int swap_off, i;
 
@@ -549,36 +545,29 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
        for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
                data = nfp_pr_et(data, nfp_net_et_stats[i].name);
 
-       for (i = 0; i < tx_rings; i++) {
-               data = nfp_pr_et(data, "txq_%u_pkts", i);
-               data = nfp_pr_et(data, "txq_%u_bytes", i);
-       }
-
-       for (i = 0; i < rx_rings; i++) {
+       for (i = 0; i < num_vecs; i++) {
                data = nfp_pr_et(data, "rxq_%u_pkts", i);
                data = nfp_pr_et(data, "rxq_%u_bytes", i);
+               data = nfp_pr_et(data, "txq_%u_pkts", i);
+               data = nfp_pr_et(data, "txq_%u_bytes", i);
        }
 
        return data;
 }
 
 static u64 *
-nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem,
-                     unsigned int rx_rings, unsigned int tx_rings)
+nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
 {
        unsigned int i;
 
        for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
                *data++ = readq(mem + nfp_net_et_stats[i].off);
 
-       for (i = 0; i < tx_rings; i++) {
-               *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
-               *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
-       }
-
-       for (i = 0; i < rx_rings; i++) {
+       for (i = 0; i < num_vecs; i++) {
                *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
                *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
+               *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
+               *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
        }
 
        return data;
@@ -633,8 +622,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
        switch (stringset) {
        case ETH_SS_STATS:
                data = nfp_vnic_get_sw_stats_strings(netdev, data);
-               data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings,
-                                                    nn->dp.num_tx_rings,
+               data = nfp_vnic_get_hw_stats_strings(data, nn->max_r_vecs,
                                                     false);
                data = nfp_mac_get_stats_strings(netdev, data);
                data = nfp_app_port_get_stats_strings(nn->port, data);
@@ -649,8 +637,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
        struct nfp_net *nn = netdev_priv(netdev);
 
        data = nfp_vnic_get_sw_stats(netdev, data);
-       data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
-                                    nn->dp.num_rx_rings, nn->dp.num_tx_rings);
+       data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->max_r_vecs);
        data = nfp_mac_get_stats(netdev, data);
        data = nfp_app_port_get_stats(nn->port, data);
 }
@@ -662,8 +649,7 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
        switch (sset) {
        case ETH_SS_STATS:
                return nfp_vnic_get_sw_stats_count(netdev) +
-                      nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings,
-                                                  nn->dp.num_tx_rings) +
+                      nfp_vnic_get_hw_stats_count(nn->max_r_vecs) +
                       nfp_mac_get_stats_count(netdev) +
                       nfp_app_port_get_stats_count(nn->port);
        default:
@@ -679,7 +665,7 @@ static void nfp_port_get_strings(struct net_device *netdev,
        switch (stringset) {
        case ETH_SS_STATS:
                if (nfp_port_is_vnic(port))
-                       data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true);
+                       data = nfp_vnic_get_hw_stats_strings(data, 0, true);
                else
                        data = nfp_mac_get_stats_strings(netdev, data);
                data = nfp_app_port_get_stats_strings(port, data);
@@ -694,7 +680,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
        struct nfp_port *port = nfp_port_from_netdev(netdev);
 
        if (nfp_port_is_vnic(port))
-               data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0);
+               data = nfp_vnic_get_hw_stats(data, port->vnic, 0);
        else
                data = nfp_mac_get_stats(netdev, data);
        data = nfp_app_port_get_stats(port, data);
@@ -708,7 +694,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
        switch (sset) {
        case ETH_SS_STATS:
                if (nfp_port_is_vnic(port))
-                       count = nfp_vnic_get_hw_stats_count(0, 0);
+                       count = nfp_vnic_get_hw_stats_count(0);
                else
                        count = nfp_mac_get_stats_count(netdev);
                count += nfp_app_port_get_stats_count(port);
index 749655c329b240021a34e99612412626c26e8855..c8d0b1016a6463e8df585e3359c137379f5faea2 100644 (file)
@@ -1248,7 +1248,7 @@ static void nfp6000_free(struct nfp_cpp *cpp)
        kfree(nfp);
 }
 
-static void nfp6000_read_serial(struct device *dev, u8 *serial)
+static int nfp6000_read_serial(struct device *dev, u8 *serial)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        int pos;
@@ -1256,25 +1256,29 @@ static void nfp6000_read_serial(struct device *dev, u8 *serial)
 
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
        if (!pos) {
-               memset(serial, 0, NFP_SERIAL_LEN);
-               return;
+               dev_err(dev, "can't find PCIe Serial Number Capability\n");
+               return -EINVAL;
        }
 
        pci_read_config_dword(pdev, pos + 4, &reg);
        put_unaligned_be16(reg >> 16, serial + 4);
        pci_read_config_dword(pdev, pos + 8, &reg);
        put_unaligned_be32(reg, serial);
+
+       return 0;
 }
 
-static u16 nfp6000_get_interface(struct device *dev)
+static int nfp6000_get_interface(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        int pos;
        u32 reg;
 
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
-       if (!pos)
-               return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff);
+       if (!pos) {
+               dev_err(dev, "can't find PCIe Serial Number Capability\n");
+               return -EINVAL;
+       }
 
        pci_read_config_dword(pdev, pos + 4, &reg);
 
index b0da3d4368505eef63a992ce4a652eaabba9e5a6..c338d539fa96738076146cacd104e2e6030b6cf6 100644 (file)
@@ -364,8 +364,8 @@ struct nfp_cpp_operations {
        int (*init)(struct nfp_cpp *cpp);
        void (*free)(struct nfp_cpp *cpp);
 
-       void (*read_serial)(struct device *dev, u8 *serial);
-       u16 (*get_interface)(struct device *dev);
+       int (*read_serial)(struct device *dev, u8 *serial);
+       int (*get_interface)(struct device *dev);
 
        int (*area_init)(struct nfp_cpp_area *area,
                         u32 dest, unsigned long long address,
index ef30597aa31963715676d9c4c8fb4a4ebe03ec10..73de57a09800d7d0c482e64d22995a3aa6c06613 100644 (file)
@@ -1163,10 +1163,10 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
 {
        const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
        struct nfp_cpp *cpp;
+       int ifc, err;
        u32 mask[2];
        u32 xpbaddr;
        size_t tgt;
-       int err;
 
        cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
        if (!cpp) {
@@ -1176,9 +1176,19 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
 
        cpp->op = ops;
        cpp->priv = priv;
-       cpp->interface = ops->get_interface(parent);
-       if (ops->read_serial)
-               ops->read_serial(parent, cpp->serial);
+
+       ifc = ops->get_interface(parent);
+       if (ifc < 0) {
+               err = ifc;
+               goto err_free_cpp;
+       }
+       cpp->interface = ifc;
+       if (ops->read_serial) {
+               err = ops->read_serial(parent, cpp->serial);
+               if (err)
+                       goto err_free_cpp;
+       }
+
        rwlock_init(&cpp->resource_lock);
        init_waitqueue_head(&cpp->waitq);
        lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
@@ -1191,7 +1201,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
        err = device_register(&cpp->dev);
        if (err < 0) {
                put_device(&cpp->dev);
-               goto err_dev;
+               goto err_free_cpp;
        }
 
        dev_set_drvdata(&cpp->dev, cpp);
@@ -1238,7 +1248,7 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
 
 err_out:
        device_unregister(&cpp->dev);
-err_dev:
+err_free_cpp:
        kfree(cpp);
 err_malloc:
        return ERR_PTR(err);
index cd34097b79f1be9d313d8f28b9701bb5bd6a3100..37a6d7822a3860647c416efeff47c7a7837a3a85 100644 (file)
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
        err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
                           nfp_resource_address(state->res),
                           fwinf, sizeof(*fwinf));
-       if (err < sizeof(*fwinf))
+       if (err < (int)sizeof(*fwinf))
                goto err_release;
 
        if (!nffw_res_flg_init_get(fwinf))
index 31288d4ad248fc5f4db4738ed9c2ca75b2a34d43..862de0f3bc41c4fe64c820fdc275c25f60afb028 100644 (file)
@@ -1,4 +1,4 @@
 obj-$(CONFIG_PCH_GBE) += pch_gbe.o
 
 pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o
-pch_gbe-y += pch_gbe_api.o pch_gbe_main.o
+pch_gbe-y += pch_gbe_main.o
index 697e29dd4bd3adf616dc43fb65c8f3378231868d..44c2f291e76633942d5353a8aeadc66d0d642d82 100644 (file)
@@ -326,32 +326,6 @@ struct pch_gbe_regs {
 #define PCH_GBE_FC_FULL                        3
 #define PCH_GBE_FC_DEFAULT             PCH_GBE_FC_FULL
 
-
-struct pch_gbe_hw;
-/**
- * struct  pch_gbe_functions - HAL APi function pointer
- * @get_bus_info:      for pch_gbe_hal_get_bus_info
- * @init_hw:           for pch_gbe_hal_init_hw
- * @read_phy_reg:      for pch_gbe_hal_read_phy_reg
- * @write_phy_reg:     for pch_gbe_hal_write_phy_reg
- * @reset_phy:         for pch_gbe_hal_phy_hw_reset
- * @sw_reset_phy:      for pch_gbe_hal_phy_sw_reset
- * @power_up_phy:      for pch_gbe_hal_power_up_phy
- * @power_down_phy:    for pch_gbe_hal_power_down_phy
- * @read_mac_addr:     for pch_gbe_hal_read_mac_addr
- */
-struct pch_gbe_functions {
-       void (*get_bus_info) (struct pch_gbe_hw *);
-       s32 (*init_hw) (struct pch_gbe_hw *);
-       s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *);
-       s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16);
-       void (*reset_phy) (struct pch_gbe_hw *);
-       void (*sw_reset_phy) (struct pch_gbe_hw *);
-       void (*power_up_phy) (struct pch_gbe_hw *hw);
-       void (*power_down_phy) (struct pch_gbe_hw *hw);
-       s32 (*read_mac_addr) (struct pch_gbe_hw *);
-};
-
 /**
  * struct pch_gbe_mac_info - MAC information
  * @addr[6]:           Store the MAC address
@@ -392,17 +366,6 @@ struct pch_gbe_phy_info {
        u16 autoneg_advertised;
 };
 
-/*!
- * @ingroup Gigabit Ether driver Layer
- * @struct  pch_gbe_bus_info
- * @brief   Bus information
- */
-struct pch_gbe_bus_info {
-       u8 type;
-       u8 speed;
-       u8 width;
-};
-
 /*!
  * @ingroup Gigabit Ether driver Layer
  * @struct  pch_gbe_hw
@@ -414,10 +377,8 @@ struct pch_gbe_hw {
        struct pch_gbe_regs  __iomem *reg;
        spinlock_t miim_lock;
 
-       const struct pch_gbe_functions *func;
        struct pch_gbe_mac_info mac;
        struct pch_gbe_phy_info phy;
-       struct pch_gbe_bus_info bus;
 };
 
 /**
@@ -680,7 +641,6 @@ void pch_gbe_set_ethtool_ops(struct net_device *netdev);
 
 /* pch_gbe_mac.c */
 s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
 u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
                          u16 data);
 #endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
deleted file mode 100644 (file)
index 5125036..0000000
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
- *
- * This code was derived from the Intel e1000e Linux driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#include "pch_gbe.h"
-#include "pch_gbe_phy.h"
-#include "pch_gbe_api.h"
-
-/* bus type values */
-#define pch_gbe_bus_type_unknown       0
-#define pch_gbe_bus_type_pci           1
-#define pch_gbe_bus_type_pcix          2
-#define pch_gbe_bus_type_pci_express   3
-#define pch_gbe_bus_type_reserved      4
-
-/* bus speed values */
-#define pch_gbe_bus_speed_unknown      0
-#define pch_gbe_bus_speed_33           1
-#define pch_gbe_bus_speed_66           2
-#define pch_gbe_bus_speed_100          3
-#define pch_gbe_bus_speed_120          4
-#define pch_gbe_bus_speed_133          5
-#define pch_gbe_bus_speed_2500         6
-#define pch_gbe_bus_speed_reserved     7
-
-/* bus width values */
-#define pch_gbe_bus_width_unknown      0
-#define pch_gbe_bus_width_pcie_x1      1
-#define pch_gbe_bus_width_pcie_x2      2
-#define pch_gbe_bus_width_pcie_x4      4
-#define pch_gbe_bus_width_32           5
-#define pch_gbe_bus_width_64           6
-#define pch_gbe_bus_width_reserved     7
-
-/**
- * pch_gbe_plat_get_bus_info - Obtain bus information for adapter
- * @hw:        Pointer to the HW structure
- */
-static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
-{
-       hw->bus.type  = pch_gbe_bus_type_pci_express;
-       hw->bus.speed = pch_gbe_bus_speed_2500;
-       hw->bus.width = pch_gbe_bus_width_pcie_x1;
-}
-
-/**
- * pch_gbe_plat_init_hw - Initialize hardware
- * @hw:        Pointer to the HW structure
- * Returns:
- *     0:              Successfully
- *     Negative value: Failed-EBUSY
- */
-static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw)
-{
-       s32 ret_val;
-
-       ret_val = pch_gbe_phy_get_id(hw);
-       if (ret_val) {
-               struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
-               netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
-               return ret_val;
-       }
-       pch_gbe_phy_init_setting(hw);
-       /* Setup Mac interface option RGMII */
-#ifdef PCH_GBE_MAC_IFOP_RGMII
-       pch_gbe_phy_set_rgmii(hw);
-#endif
-       return ret_val;
-}
-
-static const struct pch_gbe_functions pch_gbe_ops = {
-       .get_bus_info      = pch_gbe_plat_get_bus_info,
-       .init_hw           = pch_gbe_plat_init_hw,
-       .read_phy_reg      = pch_gbe_phy_read_reg_miic,
-       .write_phy_reg     = pch_gbe_phy_write_reg_miic,
-       .reset_phy         = pch_gbe_phy_hw_reset,
-       .sw_reset_phy      = pch_gbe_phy_sw_reset,
-       .power_up_phy      = pch_gbe_phy_power_up,
-       .power_down_phy    = pch_gbe_phy_power_down,
-       .read_mac_addr     = pch_gbe_mac_read_mac_addr
-};
-
-/**
- * pch_gbe_plat_init_function_pointers - Init func ptrs
- * @hw:        Pointer to the HW structure
- */
-static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
-{
-       /* Set PHY parameter */
-       hw->phy.reset_delay_us     = PCH_GBE_PHY_RESET_DELAY_US;
-       /* Set function pointers */
-       hw->func = &pch_gbe_ops;
-}
-
-/**
- * pch_gbe_hal_setup_init_funcs - Initializes function pointers
- * @hw:        Pointer to the HW structure
- * Returns:
- *     0:      Successfully
- *     ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
-{
-       if (!hw->reg) {
-               struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
-               netdev_err(adapter->netdev, "ERROR: Registers not mapped\n");
-               return -ENOSYS;
-       }
-       pch_gbe_plat_init_function_pointers(hw);
-       return 0;
-}
-
-/**
- * pch_gbe_hal_get_bus_info - Obtain bus information for adapter
- * @hw:        Pointer to the HW structure
- */
-void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
-{
-       if (!hw->func->get_bus_info) {
-               struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
-               netdev_err(adapter->netdev, "ERROR: configuration\n");
-               return;
-       }
-       hw->func->get_bus_info(hw);
-}
-
-/**
- * pch_gbe_hal_init_hw - Initialize hardware
- * @hw:        Pointer to the HW structure
- * Returns:
- *     0:      Successfully
- *     ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
-{
-       if (!hw->func->init_hw) {
-               struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
-               netdev_err(adapter->netdev, "ERROR: configuration\n");
-               return -ENOSYS;
-       }
-       return hw->func->init_hw(hw);
-}
-
-/**
- * pch_gbe_hal_read_phy_reg - Reads PHY register
- * @hw:            Pointer to the HW structure
- * @offset: The register to read
- * @data:   The buffer to store the 16-bit read.
- * Returns:
- *     0:      Successfully
- *     Negative value: Failed
- */
-s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
-                                       u16 *data)
-{
-       if (!hw->func->read_phy_reg)
-               return 0;
-       return hw->func->read_phy_reg(hw, offset, data);
-}
-
-/**
- * pch_gbe_hal_write_phy_reg - Writes PHY register
- * @hw:            Pointer to the HW structure
- * @offset: The register to read
- * @data:   The value to write.
- * Returns:
- *     0:      Successfully
- *     Negative value: Failed
- */
-s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
-                                       u16 data)
-{
-       if (!hw->func->write_phy_reg)
-               return 0;
-       return hw->func->write_phy_reg(hw, offset, data);
-}
-
-/**
- * pch_gbe_hal_phy_hw_reset - Hard PHY reset
- * @hw:            Pointer to the HW structure
- */
-void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
-{
-       if (!hw->func->reset_phy) {
-               struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
-               netdev_err(adapter->netdev, "ERROR: configuration\n");
-               return;
-       }
-       hw->func->reset_phy(hw);
-}
-
-/**
- * pch_gbe_hal_phy_sw_reset - Soft PHY reset
- * @hw:            Pointer to the HW structure
- */
-void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
-{
-       if (!hw->func->sw_reset_phy) {
-               struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
-               netdev_err(adapter->netdev, "ERROR: configuration\n");
-               return;
-       }
-       hw->func->sw_reset_phy(hw);
-}
-
-/**
- * pch_gbe_hal_read_mac_addr - Reads MAC address
- * @hw:        Pointer to the HW structure
- * Returns:
- *     0:      Successfully
- *     ENOSYS: Function is not registered
- */
-s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
-{
-       if (!hw->func->read_mac_addr) {
-               struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
-
-               netdev_err(adapter->netdev, "ERROR: configuration\n");
-               return -ENOSYS;
-       }
-       return hw->func->read_mac_addr(hw);
-}
-
-/**
- * pch_gbe_hal_power_up_phy - Power up PHY
- * @hw:        Pointer to the HW structure
- */
-void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
-{
-       if (hw->func->power_up_phy)
-               hw->func->power_up_phy(hw);
-}
-
-/**
- * pch_gbe_hal_power_down_phy - Power down PHY
- * @hw:        Pointer to the HW structure
- */
-void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw)
-{
-       if (hw->func->power_down_phy)
-               hw->func->power_down_phy(hw);
-}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h
deleted file mode 100644 (file)
index 91ce07c..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
- *
- * This code was derived from the Intel e1000e Linux driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef _PCH_GBE_API_H_
-#define _PCH_GBE_API_H_
-
-#include "pch_gbe_phy.h"
-
-s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw);
-void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data);
-s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data);
-void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw);
-void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw);
-s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw);
-void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw);
-void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw);
-
-#endif
index 731ce1e419e45971574d71c607e165d13daf4974..adaa0024adfed596b87c47ec08d9be94f1d0edb0 100644 (file)
@@ -17,7 +17,7 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 #include "pch_gbe.h"
-#include "pch_gbe_api.h"
+#include "pch_gbe_phy.h"
 
 /**
  * pch_gbe_stats - Stats item information
@@ -125,7 +125,7 @@ static int pch_gbe_set_link_ksettings(struct net_device *netdev,
        u32 advertising;
        int ret;
 
-       pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
+       pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
 
        memcpy(&copy_ecmd, ecmd, sizeof(*ecmd));
 
@@ -204,7 +204,7 @@ static void pch_gbe_get_regs(struct net_device *netdev,
                *regs_buff++ = ioread32(&hw->reg->INT_ST + i);
        /* PHY register */
        for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) {
-               pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp);
+               pch_gbe_phy_read_reg_miic(&adapter->hw, i, &tmp);
                *regs_buff++ = tmp;
        }
 }
@@ -349,25 +349,12 @@ static int pch_gbe_set_ringparam(struct net_device *netdev,
                err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
                if (err)
                        goto err_setup_tx;
-               /* save the new, restore the old in order to free it,
-                * then restore the new back again */
-#ifdef RINGFREE
-               adapter->rx_ring = rx_old;
-               adapter->tx_ring = tx_old;
-               pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
-               pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
-               kfree(tx_old);
-               kfree(rx_old);
-               adapter->rx_ring = rxdr;
-               adapter->tx_ring = txdr;
-#else
                pch_gbe_free_rx_resources(adapter, rx_old);
                pch_gbe_free_tx_resources(adapter, tx_old);
                kfree(tx_old);
                kfree(rx_old);
                adapter->rx_ring = rxdr;
                adapter->tx_ring = txdr;
-#endif
                err = pch_gbe_up(adapter);
        }
        return err;
index 34a1581eda95578b6350eeedf3c7372e4388953a..43c0c10dfeb7ad602417b3bcccfac8e3cffd9d27 100644 (file)
@@ -18,7 +18,7 @@
  */
 
 #include "pch_gbe.h"
-#include "pch_gbe_api.h"
+#include "pch_gbe_phy.h"
 #include <linux/module.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_classify.h>
@@ -34,7 +34,6 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PCH_GBE_DMA_ALIGN              0
 #define PCH_GBE_DMA_PADDING            2
 #define PCH_GBE_WATCHDOG_PERIOD                (5 * HZ)        /* watchdog time */
-#define PCH_GBE_COPYBREAK_DEFAULT      256
 #define PCH_GBE_PCI_BAR                        1
 #define PCH_GBE_RESERVE_MEMORY         0x200000        /* 2MB */
 
@@ -113,8 +112,6 @@ const char pch_driver_version[] = DRV_VERSION;
 
 #define MINNOW_PHY_RESET_GPIO          13
 
-static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
-
 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
                               int data);
@@ -290,7 +287,7 @@ static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
  * Returns:
  *     0:                      Successful.
  */
-s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
+static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
 {
        struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
        u32  adr1a, adr1b;
@@ -369,9 +366,7 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
        /* Read the MAC address. and store to the private data */
        pch_gbe_mac_read_mac_addr(hw);
        iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
-#ifdef PCH_GBE_MAC_IFOP_RGMII
        iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
-#endif
        pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
        /* Setup the receive addresses */
        pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
@@ -416,44 +411,6 @@ static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
        pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 }
 
-
-/**
- * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
- * @hw:                    Pointer to the HW structure
- * @mc_addr_list:   Array of multicast addresses to program
- * @mc_addr_count:  Number of multicast addresses to program
- * @mar_used_count: The first MAC Address register free to program
- * @mar_total_num:  Total number of supported MAC Address Registers
- */
-static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
-                                           u8 *mc_addr_list, u32 mc_addr_count,
-                                           u32 mar_used_count, u32 mar_total_num)
-{
-       u32 i, adrmask;
-
-       /* Load the first set of multicast addresses into the exact
-        * filters (RAR).  If there are not enough to fill the RAR
-        * array, clear the filters.
-        */
-       for (i = mar_used_count; i < mar_total_num; i++) {
-               if (mc_addr_count) {
-                       pch_gbe_mac_mar_set(hw, mc_addr_list, i);
-                       mc_addr_count--;
-                       mc_addr_list += ETH_ALEN;
-               } else {
-                       /* Clear MAC address mask */
-                       adrmask = ioread32(&hw->reg->ADDR_MASK);
-                       iowrite32((adrmask | (0x0001 << i)),
-                                       &hw->reg->ADDR_MASK);
-                       /* wait busy */
-                       pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
-                       /* Clear MAC address */
-                       iowrite32(0, &hw->reg->mac_adr[i].high);
-                       iowrite32(0, &hw->reg->mac_adr[i].low);
-               }
-       }
-}
-
 /**
  * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
  * @hw:                    Pointer to the HW structure
@@ -763,14 +720,23 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       struct pch_gbe_hw *hw = &adapter->hw;
+       s32 ret_val;
 
-       pch_gbe_mac_reset_hw(&adapter->hw);
+       pch_gbe_mac_reset_hw(hw);
        /* reprogram multicast address register after reset */
        pch_gbe_set_multi(netdev);
        /* Setup the receive address. */
-       pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
-       if (pch_gbe_hal_init_hw(&adapter->hw))
-               netdev_err(netdev, "Hardware Error\n");
+       pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
+
+       ret_val = pch_gbe_phy_get_id(hw);
+       if (ret_val) {
+               netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
+               return;
+       }
+       pch_gbe_phy_init_setting(hw);
+       /* Setup Mac interface option RGMII */
+       pch_gbe_phy_set_rgmii(hw);
 }
 
 /**
@@ -1036,7 +1002,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
        unsigned long rgmii = 0;
 
        /* Set the RGMII control. */
-#ifdef PCH_GBE_MAC_IFOP_RGMII
        switch (speed) {
        case SPEED_10:
                rgmii = (PCH_GBE_RGMII_RATE_2_5M |
@@ -1052,10 +1017,6 @@ static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
                break;
        }
        iowrite32(rgmii, &hw->reg->RGMII_CTRL);
-#else  /* GMII */
-       rgmii = 0;
-       iowrite32(rgmii, &hw->reg->RGMII_CTRL);
-#endif
 }
 static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
                              u16 duplex)
@@ -2029,12 +1990,8 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
        adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
        hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+       hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
 
-       /* Initialize the hardware-specific values */
-       if (pch_gbe_hal_setup_init_funcs(hw)) {
-               netdev_err(netdev, "Hardware Initialization Failure\n");
-               return -EIO;
-       }
        if (pch_gbe_alloc_queues(adapter)) {
                netdev_err(netdev, "Unable to allocate memory for queues\n");
                return -ENOMEM;
@@ -2075,7 +2032,7 @@ static int pch_gbe_open(struct net_device *netdev)
        err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
        if (err)
                goto err_setup_rx;
-       pch_gbe_hal_power_up_phy(hw);
+       pch_gbe_phy_power_up(hw);
        err = pch_gbe_up(adapter);
        if (err)
                goto err_up;
@@ -2084,7 +2041,7 @@ static int pch_gbe_open(struct net_device *netdev)
 
 err_up:
        if (!adapter->wake_up_evt)
-               pch_gbe_hal_power_down_phy(hw);
+               pch_gbe_phy_power_down(hw);
        pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
 err_setup_rx:
        pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
@@ -2107,7 +2064,7 @@ static int pch_gbe_stop(struct net_device *netdev)
 
        pch_gbe_down(adapter);
        if (!adapter->wake_up_evt)
-               pch_gbe_hal_power_down_phy(hw);
+               pch_gbe_phy_power_down(hw);
        pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
        pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
        return 0;
@@ -2148,50 +2105,52 @@ static void pch_gbe_set_multi(struct net_device *netdev)
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
        struct pch_gbe_hw *hw = &adapter->hw;
        struct netdev_hw_addr *ha;
-       u8 *mta_list;
-       u32 rctl;
-       int i;
-       int mc_count;
+       u32 rctl, adrmask;
+       int mc_count, i;
 
        netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
 
-       /* Check for Promiscuous and All Multicast modes */
+       /* By default enable address & multicast filtering */
        rctl = ioread32(&hw->reg->RX_MODE);
+       rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
+
+       /* Promiscuous mode disables all hardware address filtering */
+       if (netdev->flags & IFF_PROMISC)
+               rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
+
+       /* If we want to monitor more multicast addresses than the hardware can
+        * support then disable hardware multicast filtering.
+        */
        mc_count = netdev_mc_count(netdev);
-       if ((netdev->flags & IFF_PROMISC)) {
-               rctl &= ~PCH_GBE_ADD_FIL_EN;
-               rctl &= ~PCH_GBE_MLT_FIL_EN;
-       } else if ((netdev->flags & IFF_ALLMULTI)) {
-               /* all the multicasting receive permissions */
-               rctl |= PCH_GBE_ADD_FIL_EN;
+       if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
                rctl &= ~PCH_GBE_MLT_FIL_EN;
-       } else {
-               if (mc_count >= PCH_GBE_MAR_ENTRIES) {
-                       /* all the multicasting receive permissions */
-                       rctl |= PCH_GBE_ADD_FIL_EN;
-                       rctl &= ~PCH_GBE_MLT_FIL_EN;
-               } else {
-                       rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
-               }
-       }
+
        iowrite32(rctl, &hw->reg->RX_MODE);
 
-       if (mc_count >= PCH_GBE_MAR_ENTRIES)
-               return;
-       mta_list = kmalloc_array(ETH_ALEN, mc_count, GFP_ATOMIC);
-       if (!mta_list)
+       /* If we're not using multicast filtering then there's no point
+        * configuring the unused MAC address registers.
+        */
+       if (!(rctl & PCH_GBE_MLT_FIL_EN))
                return;
 
-       /* The shared function expects a packed array of only addresses. */
-       i = 0;
-       netdev_for_each_mc_addr(ha, netdev) {
-               if (i == mc_count)
-                       break;
-               memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
+       /* Load the first set of multicast addresses into MAC address registers
+        * for use by hardware filtering.
+        */
+       i = 1;
+       netdev_for_each_mc_addr(ha, netdev)
+               pch_gbe_mac_mar_set(hw, ha->addr, i++);
+
+       /* If there are spare MAC registers, mask & clear them */
+       for (; i < PCH_GBE_MAR_ENTRIES; i++) {
+               /* Clear MAC address mask */
+               adrmask = ioread32(&hw->reg->ADDR_MASK);
+               iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
+               /* wait busy */
+               pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
+               /* Clear MAC address */
+               iowrite32(0, &hw->reg->mac_adr[i].high);
+               iowrite32(0, &hw->reg->mac_adr[i].low);
        }
-       pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
-                                       PCH_GBE_MAR_ENTRIES);
-       kfree(mta_list);
 
        netdev_dbg(netdev,
                 "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
@@ -2437,7 +2396,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
        }
        pci_set_master(pdev);
        pci_enable_wake(pdev, PCI_D0, 0);
-       pch_gbe_hal_power_up_phy(hw);
+       pch_gbe_phy_power_up(hw);
        pch_gbe_reset(adapter);
        /* Clear wake up status */
        pch_gbe_mac_set_wol_event(hw, 0);
@@ -2482,7 +2441,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev)
                pch_gbe_mac_set_wol_event(hw, wufc);
                pci_disable_device(pdev);
        } else {
-               pch_gbe_hal_power_down_phy(hw);
+               pch_gbe_phy_power_down(hw);
                pch_gbe_mac_set_wol_event(hw, wufc);
                pci_disable_device(pdev);
        }
@@ -2511,7 +2470,7 @@ static int pch_gbe_resume(struct device *device)
                return err;
        }
        pci_set_master(pdev);
-       pch_gbe_hal_power_up_phy(hw);
+       pch_gbe_phy_power_up(hw);
        pch_gbe_reset(adapter);
        /* Clear wake on lan control and status */
        pch_gbe_mac_set_wol_event(hw, 0);
@@ -2541,7 +2500,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
        cancel_work_sync(&adapter->reset_task);
        unregister_netdev(netdev);
 
-       pch_gbe_hal_phy_hw_reset(&adapter->hw);
+       pch_gbe_phy_hw_reset(&adapter->hw);
 
        free_netdev(netdev);
 }
@@ -2627,10 +2586,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
                dev_err(&pdev->dev, "PHY initialize error\n");
                goto err_free_adapter;
        }
-       pch_gbe_hal_get_bus_info(&adapter->hw);
 
        /* Read the MAC address. and store to the private data */
-       ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
+       ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
        if (ret) {
                dev_err(&pdev->dev, "MAC address Read Error\n");
                goto err_free_adapter;
@@ -2677,7 +2635,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
        return 0;
 
 err_free_adapter:
-       pch_gbe_hal_phy_hw_reset(&adapter->hw);
+       pch_gbe_phy_hw_reset(&adapter->hw);
 err_free_netdev:
        free_netdev(netdev);
        return ret;
@@ -2776,32 +2734,7 @@ static struct pci_driver pch_gbe_driver = {
        .shutdown = pch_gbe_shutdown,
        .err_handler = &pch_gbe_err_handler
 };
-
-
-static int __init pch_gbe_init_module(void)
-{
-       int ret;
-
-       pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION);
-       ret = pci_register_driver(&pch_gbe_driver);
-       if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
-               if (copybreak == 0) {
-                       pr_info("copybreak disabled\n");
-               } else {
-                       pr_info("copybreak enabled for packets <= %u bytes\n",
-                               copybreak);
-               }
-       }
-       return ret;
-}
-
-static void __exit pch_gbe_exit_module(void)
-{
-       pci_unregister_driver(&pch_gbe_driver);
-}
-
-module_init(pch_gbe_init_module);
-module_exit(pch_gbe_exit_module);
+module_pci_driver(pch_gbe_driver);
 
 MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
 MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
@@ -2809,8 +2742,4 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
 
-module_param(copybreak, uint, 0644);
-MODULE_PARM_DESC(copybreak,
-       "Maximum size of packet that is copied to a new buffer on receive");
-
 /* pch_gbe_main.c */
index a5cad5ea9436bae73fd8fa58f2cfb01b7c713a63..6b35b573beef292148b9d392c2af739e0a1fc334 100644 (file)
@@ -184,7 +184,7 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
  * pch_gbe_phy_sw_reset - PHY software reset
  * @hw:                    Pointer to the HW structure
  */
-void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
+static void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
 {
        u16 phy_ctrl;
 
index 95ad0151ad028aca6312f3b0c65157a3f2d0ba7d..23ac38711619207c93e4a5e378c85298b8145595 100644 (file)
 
 #define PCH_GBE_PHY_REGS_LEN           32
 #define        PCH_GBE_PHY_RESET_DELAY_US      10
-#define PCH_GBE_MAC_IFOP_RGMII
 
 s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw);
 s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data);
 s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data);
-void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw);
 void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw);
 void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
 void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
index b5ea2a56106ef3c82571c354c318b8fe40086578..1df28f2edd1f9b051ede136997e1e99a6a4c83fc 100644 (file)
@@ -2,7 +2,7 @@
 # Packet engine device configuration
 #
 
-config NET_PACKET_ENGINE
+config NET_VENDOR_PACKET_ENGINES
        bool "Packet Engine devices"
        default y
        depends on PCI
@@ -14,7 +14,7 @@ config NET_PACKET_ENGINE
          the questions about packet engine devices. If you say Y, you will
          be asked for your specific card in the following questions.
 
-if NET_PACKET_ENGINE
+if NET_VENDOR_PACKET_ENGINES
 
 config HAMACHI
        tristate "Packet Engines Hamachi GNIC-II support"
@@ -40,4 +40,4 @@ config YELLOWFIN
          To compile this driver as a module, choose M here: the module
          will be called yellowfin.  This is recommended.
 
-endif # NET_PACKET_ENGINE
+endif # NET_VENDOR_PACKET_ENGINES
index 1cd39c9a03455595d23c490313043f416309d6f3..52ad8062133521a31254789fc43126e80142b0bc 100644 (file)
@@ -566,9 +566,8 @@ static int
 netxen_send_cmd_descs(struct netxen_adapter *adapter,
                struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
 {
-       u32 i, producer, consumer;
+       u32 i, producer;
        struct netxen_cmd_buffer *pbuf;
-       struct cmd_desc_type0 *cmd_desc;
        struct nx_host_tx_ring *tx_ring;
 
        i = 0;
@@ -580,7 +579,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
        __netif_tx_lock_bh(tx_ring->txq);
 
        producer = tx_ring->producer;
-       consumer = tx_ring->sw_consumer;
 
        if (nr_desc >= netxen_tx_avail(tx_ring)) {
                netif_tx_stop_queue(tx_ring->txq);
@@ -595,8 +593,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
        }
 
        do {
-               cmd_desc = &cmd_desc_arr[i];
-
                pbuf = &tx_ring->cmd_buf_arr[producer];
                pbuf->skb = NULL;
                pbuf->frag_count = 0;
@@ -2350,7 +2346,7 @@ static int netxen_md_entry_err_chk(struct netxen_adapter *adapter,
 static int netxen_parse_md_template(struct netxen_adapter *adapter)
 {
        int num_of_entries, buff_level, e_cnt, esize;
-       int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0;
+       int rv = 0, sane_start = 0, sane_end = 0;
        char *dbuff;
        void *template_buff = adapter->mdump.md_template;
        char *dump_buff = adapter->mdump.md_capture_buff;
@@ -2386,8 +2382,6 @@ static int netxen_parse_md_template(struct netxen_adapter *adapter)
                        break;
                case RDEND:
                        entry->hdr.driver_flags |= NX_DUMP_SKIP;
-                       if (!sane_end)
-                               end_cnt = e_cnt;
                        sane_end += 1;
                        break;
                case CNTRL:
index 8259e8309320ae9ea3e000a2a048be449ba708cb..69aa7fc392c5e4ad1cbcd9025f56bffdf3aa92c7 100644 (file)
@@ -2073,7 +2073,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        struct skb_frag_struct *frag;
 
        u32 producer;
-       int frag_count, no_of_desc;
+       int frag_count;
        u32 num_txd = tx_ring->num_desc;
 
        frag_count = skb_shinfo(skb)->nr_frags + 1;
@@ -2093,8 +2093,6 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
                frag_count = 1 + skb_shinfo(skb)->nr_frags;
        }
-       /* 4 fragments per cmd des */
-       no_of_desc = (frag_count + 3) >> 2;
 
        if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
                netif_stop_queue(netdev);
index 8f31406ec89407713b2ad32c81a30185b2c05727..12b4c2ab57966741c5698f81c1a74a4fe5f4e70a 100644 (file)
@@ -221,7 +221,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
        struct qed_hw_info *p_info = &p_hwfn->hw_info;
        enum qed_pci_personality personality;
        enum dcbx_protocol_type id;
-       char *name;
        int i;
 
        for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
@@ -231,7 +230,6 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
                        continue;
 
                personality = qed_dcbx_app_update[i].personality;
-               name = qed_dcbx_app_update[i].name;
 
                qed_dcbx_set_params(p_data, p_info, enable,
                                    prio, tc, type, personality);
@@ -255,9 +253,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
                *type = DCBX_PROTOCOL_ROCE_V2;
        } else {
                *type = DCBX_MAX_PROTOCOL_TYPE;
-               DP_ERR(p_hwfn,
-                      "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
-                      id, app_prio_bitmap);
+               DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
+                      app_prio_bitmap);
                return false;
        }
 
@@ -710,9 +707,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
        p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
 
        memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
-              ARRAY_SIZE(p_local->local_chassis_id));
+              sizeof(p_local->local_chassis_id));
        memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
-              ARRAY_SIZE(p_local->local_port_id));
+              sizeof(p_local->local_port_id));
 }
 
 static void
@@ -724,9 +721,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
        p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
 
        memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
-              ARRAY_SIZE(p_remote->peer_chassis_id));
+              sizeof(p_remote->peer_chassis_id));
        memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
-              ARRAY_SIZE(p_remote->peer_port_id));
+              sizeof(p_remote->peer_port_id));
 }
 
 static int
@@ -1479,8 +1476,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
                *cap = 0x80;
                break;
        case DCB_CAP_ATTR_DCBX:
-               *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
-                       DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
+               *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
+                       DCB_CAP_DCBX_STATIC);
                break;
        default:
                *cap = false;
@@ -1548,8 +1545,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
        if (!dcbx_info)
                return 0;
 
-       if (dcbx_info->operational.enabled)
-               mode |= DCB_CAP_DCBX_LLD_MANAGED;
        if (dcbx_info->operational.ieee)
                mode |= DCB_CAP_DCBX_VER_IEEE;
        if (dcbx_info->operational.cee)
index 329781cda77fbecc88328ea95f00e39d4be5db9b..e5249b4741d03f7c347c70a861288b787653741a 100644 (file)
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
                        DP_INFO(p_hwfn, "Failed to update driver state\n");
 
                rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
-                                              QED_OV_ESWITCH_VEB);
+                                              QED_OV_ESWITCH_NONE);
                if (rc)
                        DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
        }
index c97ebd681c471196cb4135deafbf8e07efc9d615..012973d75ad039436fb0007e9452eb0565f4938c 100644 (file)
@@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
 
        skb = build_skb(buffer->data, 0);
        if (!skb) {
-               rc = -ENOMEM;
-               goto out_post;
+               DP_INFO(cdev, "Failed to build SKB\n");
+               kfree(buffer->data);
+               goto out_post1;
        }
 
        data->u.placement_offset += NET_SKB_PAD;
@@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
                cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
                                      data->opaque_data_0,
                                      data->opaque_data_1);
+       } else {
+               DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
+                                   QED_MSG_LL2 | QED_MSG_STORAGE),
+                          "Dropping the packet\n");
+               kfree(buffer->data);
        }
 
+out_post1:
        /* Update Buffer information and update FW producer */
        buffer->data = new_data;
        buffer->phys_addr = new_phys_addr;
index b04d57ca5176ee65f348bb5882965e19f107e2f8..0cbc74d6ca8b4914b01c4e12a253a59dcee21bf8 100644 (file)
@@ -567,8 +567,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
                /* Fastpath interrupts */
                for (j = 0; j < 64; j++) {
                        if ((0x2ULL << j) & status) {
-                               hwfn->simd_proto_handler[j].func(
-                                       hwfn->simd_proto_handler[j].token);
+                               struct qed_simd_fp_handler *p_handler =
+                                       &hwfn->simd_proto_handler[j];
+
+                               if (p_handler->func)
+                                       p_handler->func(p_handler->token);
+                               else
+                                       DP_NOTICE(hwfn,
+                                                 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
+                                                 j, status);
+
                                status &= ~(0x2ULL << j);
                                rc = IRQ_HANDLED;
                        }
@@ -781,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
        /* We want a minimum of one slowpath and one fastpath vector per hwfn */
        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 
+       if (is_kdump_kernel()) {
+               DP_INFO(cdev,
+                       "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
+                       cdev->int_params.in.min_msix_cnt);
+               cdev->int_params.in.num_vectors =
+                       cdev->int_params.in.min_msix_cnt;
+       }
+
        rc = qed_set_int_mode(cdev, false);
        if (rc)  {
                DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
index f01bf52bc381f6f02c33ee3d9df4a90982cf8245..fd59cf45f4be8cb008d8728398870cc1ab41210b 100644 (file)
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
 static int qed_sriov_enable(struct qed_dev *cdev, int num)
 {
        struct qed_iov_vf_init_params params;
+       struct qed_hwfn *hwfn;
+       struct qed_ptt *ptt;
        int i, j, rc;
 
        if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
 
        /* Initialize HW for VF access */
        for_each_hwfn(cdev, j) {
-               struct qed_hwfn *hwfn = &cdev->hwfns[j];
-               struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+               hwfn = &cdev->hwfns[j];
+               ptt = qed_ptt_acquire(hwfn);
 
                /* Make sure not to use more than 16 queues per VF */
                params.num_queues = min_t(int,
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
                goto err;
        }
 
+       hwfn = QED_LEADING_HWFN(cdev);
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt) {
+               DP_ERR(hwfn, "Failed to acquire ptt\n");
+               rc = -EBUSY;
+               goto err;
+       }
+
+       rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
+       if (rc)
+               DP_INFO(cdev, "Failed to update eswitch mode\n");
+       qed_ptt_release(hwfn, ptt);
+
        return num;
 
 err:
index b823bfe2ea4d6a6851699ef225265dbd333b0143..f9a327c821eb608eae5743250bb0dbc55bb1fc2f 100644 (file)
@@ -1116,7 +1116,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        case XDP_SETUP_PROG:
                return qede_xdp_set(edev, xdp->prog);
        case XDP_QUERY_PROG:
-               xdp->prog_attached = !!edev->xdp_prog;
                xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
                return 0;
        default:
index 02adb513f4756cb58c423936213bdcb4158d1dfa..013ff567283c738f342ca5d6f5358e30ca6daa72 100644 (file)
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
 {
        struct qede_ptp *ptp = edev->ptp;
 
-       if (!ptp)
-               return -EIO;
+       if (!ptp) {
+               info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                                       SOF_TIMESTAMPING_RX_SOFTWARE |
+                                       SOF_TIMESTAMPING_SOFTWARE;
+               info->phc_index = -1;
+
+               return 0;
+       }
 
        info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
                                SOF_TIMESTAMPING_RX_SOFTWARE |
index 0c744b9c6e0adf96f91d6aba6c7cda34b208c7fe..77e386ebff09c110ecd77e6c289e3354fe52cd22 100644 (file)
@@ -212,7 +212,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
                        vp->max_tx_bw = MAX_BW;
                        vp->min_tx_bw = MIN_BW;
                        vp->spoofchk = false;
-                       random_ether_addr(vp->mac);
+                       eth_random_addr(vp->mac);
                        dev_info(&adapter->pdev->dev,
                                 "MAC Address %pM is configured for VF %d\n",
                                 vp->mac, i);
index b9a7548ec6a0a7ed2cc7939b4adac70188b37cd8..0afc3d335d562d24466b9192aea291b910ebcdfe 100644 (file)
@@ -210,7 +210,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
        rmnet_dev->netdev_ops = &rmnet_vnd_ops;
        rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
        rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
-       random_ether_addr(rmnet_dev->dev_addr);
+       eth_random_addr(rmnet_dev->dev_addr);
        rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
 
        /* Raw IP mode */
index 7c69f4c8134da8d13e85235d830c2b58c975b740..e1cd934c2e4f40b7d423fc1bc7870c63034ae739 100644 (file)
@@ -99,7 +99,7 @@ config R8169
        depends on PCI
        select FW_LOADER
        select CRC32
-       select MII
+       select PHYLIB
        ---help---
          Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
 
index 75dfac0248f45cb423fd9883e38349a456b1dc0d..3d50378a11d701983f053e6fc33eb9d439a55253 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/etherdevice.h>
 #include <linux/delay.h>
 #include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
 #include <linux/if_vlan.h>
 #include <linux/crc32.h>
 #include <linux/in.h>
@@ -25,7 +25,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
 #include <linux/firmware.h>
-#include <linux/pci-aspm.h>
 #include <linux/prefetch.h>
 #include <linux/ipv6.h>
 #include <net/ip6_checksum.h>
@@ -35,7 +34,6 @@
 
 #define RTL8169_VERSION "2.3LK-NAPI"
 #define MODULENAME "r8169"
-#define PFX MODULENAME ": "
 
 #define FIRMWARE_8168D_1       "rtl_nic/rtl8168d-1.fw"
 #define FIRMWARE_8168D_2       "rtl_nic/rtl8168d-2.fw"
 #define FIRMWARE_8107E_1       "rtl_nic/rtl8107e-1.fw"
 #define FIRMWARE_8107E_2       "rtl_nic/rtl8107e-2.fw"
 
-#ifdef RTL8169_DEBUG
-#define assert(expr) \
-       if (!(expr)) {                                  \
-               printk( "Assertion failed! %s,%s,%s,line=%d\n", \
-               #expr,__FILE__,__func__,__LINE__);              \
-       }
-#define dprintk(fmt, args...) \
-       do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
-#else
-#define assert(expr) do {} while (0)
-#define dprintk(fmt, args...)  do {} while (0)
-#endif /* RTL8169_DEBUG */
-
 #define R8169_MSG_DEFAULT \
        (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
 
@@ -95,7 +80,6 @@ static const int multicast_filter_limit = 32;
 #define R8169_RX_RING_BYTES    (NUM_RX_DESC * sizeof(struct RxDesc))
 
 #define RTL8169_TX_TIMEOUT     (6*HZ)
-#define RTL8169_PHY_TIMEOUT    (10*HZ)
 
 /* write/read MMIO register */
 #define RTL_W8(tp, reg, val8)  writeb((val8), tp->mmio_addr + (reg))
@@ -399,12 +383,6 @@ enum rtl_registers {
        FuncForceEvent  = 0xfc,
 };
 
-enum rtl8110_registers {
-       TBICSR                  = 0x64,
-       TBI_ANAR                = 0x68,
-       TBI_LPAR                = 0x6a,
-};
-
 enum rtl8168_8101_registers {
        CSIDR                   = 0x64,
        CSIAR                   = 0x68,
@@ -571,14 +549,6 @@ enum rtl_register_content {
        PMEStatus       = (1 << 0),     /* PME status can be reset by PCI RST# */
        ASPM_en         = (1 << 0),     /* ASPM enable */
 
-       /* TBICSR p.28 */
-       TBIReset        = 0x80000000,
-       TBILoopback     = 0x40000000,
-       TBINwEnable     = 0x20000000,
-       TBINwRestart    = 0x10000000,
-       TBILinkOk       = 0x02000000,
-       TBINwComplete   = 0x01000000,
-
        /* CPlusCmd p.31 */
        EnableBist      = (1 << 15),    // 8168 8101
        Mac_dbgo_oe     = (1 << 14),    // 8168 8101
@@ -732,7 +702,6 @@ enum rtl_flag {
        RTL_FLAG_TASK_ENABLED,
        RTL_FLAG_TASK_SLOW_PENDING,
        RTL_FLAG_TASK_RESET_PENDING,
-       RTL_FLAG_TASK_PHY_PENDING,
        RTL_FLAG_MAX
 };
 
@@ -760,7 +729,6 @@ struct rtl8169_private {
        dma_addr_t RxPhyAddr;
        void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
        struct ring_info tx_skb[NUM_TX_DESC];   /* Tx data buffers */
-       struct timer_list timer;
        u16 cp_cmd;
 
        u16 event_slow;
@@ -776,14 +744,7 @@ struct rtl8169_private {
                void (*disable)(struct rtl8169_private *);
        } jumbo_ops;
 
-       int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
-       int (*get_link_ksettings)(struct net_device *,
-                                 struct ethtool_link_ksettings *);
-       void (*phy_reset_enable)(struct rtl8169_private *tp);
        void (*hw_start)(struct rtl8169_private *tp);
-       unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
-       unsigned int (*link_ok)(struct rtl8169_private *tp);
-       int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
        bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
 
        struct {
@@ -792,7 +753,8 @@ struct rtl8169_private {
                struct work_struct work;
        } wk;
 
-       struct mii_if_info mii;
+       unsigned supports_gmii:1;
+       struct mii_bus *mii_bus;
        dma_addr_t counters_phys_addr;
        struct rtl8169_counters *counters;
        struct rtl8169_tc_offsets tc_offset;
@@ -1143,21 +1105,6 @@ static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
        rtl_writephy(tp, reg_addr, (val & ~m) | p);
 }
 
-static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
-                          int val)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-
-       rtl_writephy(tp, location, val);
-}
-
-static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-
-       return rtl_readphy(tp, location);
-}
-
 DECLARE_RTL_COND(rtl_ephyar_cond)
 {
        return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
@@ -1478,54 +1425,22 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
        RTL_R8(tp, ChipCmd);
 }
 
-static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
-{
-       return RTL_R32(tp, TBICSR) & TBIReset;
-}
-
-static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
-{
-       return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
-}
-
-static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp)
-{
-       return RTL_R32(tp, TBICSR) & TBILinkOk;
-}
-
-static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp)
-{
-       return RTL_R8(tp, PHYstatus) & LinkStatus;
-}
-
-static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
-{
-       RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset);
-}
-
-static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
-{
-       unsigned int val;
-
-       val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
-       rtl_writephy(tp, MII_BMCR, val & 0xffff);
-}
-
 static void rtl_link_chg_patch(struct rtl8169_private *tp)
 {
        struct net_device *dev = tp->dev;
+       struct phy_device *phydev = dev->phydev;
 
        if (!netif_running(dev))
                return;
 
        if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
            tp->mac_version == RTL_GIGA_MAC_VER_38) {
-               if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
+               if (phydev->speed == SPEED_1000) {
                        rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
                                      ERIAR_EXGMAC);
                        rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
                                      ERIAR_EXGMAC);
-               } else if (RTL_R8(tp, PHYstatus) & _100bps) {
+               } else if (phydev->speed == SPEED_100) {
                        rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
                                      ERIAR_EXGMAC);
                        rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1543,7 +1458,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
                             ERIAR_EXGMAC);
        } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
                   tp->mac_version == RTL_GIGA_MAC_VER_36) {
-               if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
+               if (phydev->speed == SPEED_1000) {
                        rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
                                      ERIAR_EXGMAC);
                        rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1555,7 +1470,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
                                      ERIAR_EXGMAC);
                }
        } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
-               if (RTL_R8(tp, PHYstatus) & _10bps) {
+               if (phydev->speed == SPEED_10) {
                        rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
                                      ERIAR_EXGMAC);
                        rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
@@ -1567,25 +1482,6 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
        }
 }
 
-static void rtl8169_check_link_status(struct net_device *dev,
-                                     struct rtl8169_private *tp)
-{
-       struct device *d = tp_to_dev(tp);
-
-       if (tp->link_ok(tp)) {
-               rtl_link_chg_patch(tp);
-               /* This is to cancel a scheduled suspend if there's one. */
-               pm_request_resume(d);
-               netif_carrier_on(dev);
-               if (net_ratelimit())
-                       netif_info(tp, ifup, dev, "link up\n");
-       } else {
-               netif_carrier_off(dev);
-               netif_info(tp, ifdown, dev, "link down\n");
-               pm_runtime_idle(d);
-       }
-}
-
 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
 
 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -1626,21 +1522,11 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
-       struct device *d = tp_to_dev(tp);
-
-       pm_runtime_get_noresume(d);
 
        rtl_lock_work(tp);
-
        wol->supported = WAKE_ANY;
-       if (pm_runtime_active(d))
-               wol->wolopts = __rtl8169_get_wol(tp);
-       else
-               wol->wolopts = tp->saved_wolopts;
-
+       wol->wolopts = tp->saved_wolopts;
        rtl_unlock_work(tp);
-
-       pm_runtime_put_noidle(d);
 }
 
 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1716,18 +1602,21 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        struct rtl8169_private *tp = netdev_priv(dev);
        struct device *d = tp_to_dev(tp);
 
+       if (wol->wolopts & ~WAKE_ANY)
+               return -EINVAL;
+
        pm_runtime_get_noresume(d);
 
        rtl_lock_work(tp);
 
+       tp->saved_wolopts = wol->wolopts;
+
        if (pm_runtime_active(d))
-               __rtl8169_set_wol(tp, wol->wolopts);
-       else
-               tp->saved_wolopts = wol->wolopts;
+               __rtl8169_set_wol(tp, tp->saved_wolopts);
 
        rtl_unlock_work(tp);
 
-       device_set_wakeup_enable(d, wol->wolopts);
+       device_set_wakeup_enable(d, tp->saved_wolopts);
 
        pm_runtime_put_noidle(d);
 
@@ -1759,124 +1648,6 @@ static int rtl8169_get_regs_len(struct net_device *dev)
        return R8169_REGS_SIZE;
 }
 
-static int rtl8169_set_speed_tbi(struct net_device *dev,
-                                u8 autoneg, u16 speed, u8 duplex, u32 ignored)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-       int ret = 0;
-       u32 reg;
-
-       reg = RTL_R32(tp, TBICSR);
-       if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
-           (duplex == DUPLEX_FULL)) {
-               RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart));
-       } else if (autoneg == AUTONEG_ENABLE)
-               RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart);
-       else {
-               netif_warn(tp, link, dev,
-                          "incorrect speed setting refused in TBI mode\n");
-               ret = -EOPNOTSUPP;
-       }
-
-       return ret;
-}
-
-static int rtl8169_set_speed_xmii(struct net_device *dev,
-                                 u8 autoneg, u16 speed, u8 duplex, u32 adv)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-       int giga_ctrl, bmcr;
-       int rc = -EINVAL;
-
-       rtl_writephy(tp, 0x1f, 0x0000);
-
-       if (autoneg == AUTONEG_ENABLE) {
-               int auto_nego;
-
-               auto_nego = rtl_readphy(tp, MII_ADVERTISE);
-               auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
-                               ADVERTISE_100HALF | ADVERTISE_100FULL);
-
-               if (adv & ADVERTISED_10baseT_Half)
-                       auto_nego |= ADVERTISE_10HALF;
-               if (adv & ADVERTISED_10baseT_Full)
-                       auto_nego |= ADVERTISE_10FULL;
-               if (adv & ADVERTISED_100baseT_Half)
-                       auto_nego |= ADVERTISE_100HALF;
-               if (adv & ADVERTISED_100baseT_Full)
-                       auto_nego |= ADVERTISE_100FULL;
-
-               auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-
-               giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
-               giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
-
-               /* The 8100e/8101e/8102e do Fast Ethernet only. */
-               if (tp->mii.supports_gmii) {
-                       if (adv & ADVERTISED_1000baseT_Half)
-                               giga_ctrl |= ADVERTISE_1000HALF;
-                       if (adv & ADVERTISED_1000baseT_Full)
-                               giga_ctrl |= ADVERTISE_1000FULL;
-               } else if (adv & (ADVERTISED_1000baseT_Half |
-                                 ADVERTISED_1000baseT_Full)) {
-                       netif_info(tp, link, dev,
-                                  "PHY does not support 1000Mbps\n");
-                       goto out;
-               }
-
-               bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
-
-               rtl_writephy(tp, MII_ADVERTISE, auto_nego);
-               rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
-       } else {
-               if (speed == SPEED_10)
-                       bmcr = 0;
-               else if (speed == SPEED_100)
-                       bmcr = BMCR_SPEED100;
-               else
-                       goto out;
-
-               if (duplex == DUPLEX_FULL)
-                       bmcr |= BMCR_FULLDPLX;
-       }
-
-       rtl_writephy(tp, MII_BMCR, bmcr);
-
-       if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
-           tp->mac_version == RTL_GIGA_MAC_VER_03) {
-               if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
-                       rtl_writephy(tp, 0x17, 0x2138);
-                       rtl_writephy(tp, 0x0e, 0x0260);
-               } else {
-                       rtl_writephy(tp, 0x17, 0x2108);
-                       rtl_writephy(tp, 0x0e, 0x0000);
-               }
-       }
-
-       rc = 0;
-out:
-       return rc;
-}
-
-static int rtl8169_set_speed(struct net_device *dev,
-                            u8 autoneg, u16 speed, u8 duplex, u32 advertising)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-       int ret;
-
-       ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
-       if (ret < 0)
-               goto out;
-
-       if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
-           (advertising & ADVERTISED_1000baseT_Full) &&
-           !pci_is_pcie(tp->pci_dev)) {
-               mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
-       }
-out:
-       return ret;
-}
-
 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
        netdev_features_t features)
 {
@@ -1940,76 +1711,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
 }
 
-static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
-                                         struct ethtool_link_ksettings *cmd)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-       u32 status;
-       u32 supported, advertising;
-
-       supported =
-               SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
-       cmd->base.port = PORT_FIBRE;
-
-       status = RTL_R32(tp, TBICSR);
-       advertising = (status & TBINwEnable) ?  ADVERTISED_Autoneg : 0;
-       cmd->base.autoneg = !!(status & TBINwEnable);
-
-       cmd->base.speed = SPEED_1000;
-       cmd->base.duplex = DUPLEX_FULL; /* Always set */
-
-       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
-                                               supported);
-       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
-                                               advertising);
-
-       return 0;
-}
-
-static int rtl8169_get_link_ksettings_xmii(struct net_device *dev,
-                                          struct ethtool_link_ksettings *cmd)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-
-       mii_ethtool_get_link_ksettings(&tp->mii, cmd);
-
-       return 0;
-}
-
-static int rtl8169_get_link_ksettings(struct net_device *dev,
-                                     struct ethtool_link_ksettings *cmd)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-       int rc;
-
-       rtl_lock_work(tp);
-       rc = tp->get_link_ksettings(dev, cmd);
-       rtl_unlock_work(tp);
-
-       return rc;
-}
-
-static int rtl8169_set_link_ksettings(struct net_device *dev,
-                                     const struct ethtool_link_ksettings *cmd)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-       int rc;
-       u32 advertising;
-
-       if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
-           cmd->link_modes.advertising))
-               return -EINVAL;
-
-       del_timer_sync(&tp->timer);
-
-       rtl_lock_work(tp);
-       rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
-                              cmd->base.duplex, advertising);
-       rtl_unlock_work(tp);
-
-       return rc;
-}
-
 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
                             void *p)
 {
@@ -2185,13 +1886,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
        }
 }
 
-static int rtl8169_nway_reset(struct net_device *dev)
-{
-       struct rtl8169_private *tp = netdev_priv(dev);
-
-       return mii_nway_restart(&tp->mii);
-}
-
 /*
  * Interrupt coalescing
  *
@@ -2264,7 +1958,7 @@ static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
        const struct rtl_coalesce_info *ci;
        int rc;
 
-       rc = rtl8169_get_link_ksettings(dev, &ecmd);
+       rc = phy_ethtool_get_link_ksettings(dev, &ecmd);
        if (rc < 0)
                return ERR_PTR(rc);
 
@@ -2422,9 +2116,9 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
        .get_sset_count         = rtl8169_get_sset_count,
        .get_ethtool_stats      = rtl8169_get_ethtool_stats,
        .get_ts_info            = ethtool_op_get_ts_info,
-       .nway_reset             = rtl8169_nway_reset,
-       .get_link_ksettings     = rtl8169_get_link_ksettings,
-       .set_link_ksettings     = rtl8169_set_link_ksettings,
+       .nway_reset             = phy_ethtool_nway_reset,
+       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
 };
 
 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -2537,15 +2231,15 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
                           "unknown MAC, using family default\n");
                tp->mac_version = default_version;
        } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
-               tp->mac_version = tp->mii.supports_gmii ?
+               tp->mac_version = tp->supports_gmii ?
                                  RTL_GIGA_MAC_VER_42 :
                                  RTL_GIGA_MAC_VER_43;
        } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
-               tp->mac_version = tp->mii.supports_gmii ?
+               tp->mac_version = tp->supports_gmii ?
                                  RTL_GIGA_MAC_VER_45 :
                                  RTL_GIGA_MAC_VER_47;
        } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
-               tp->mac_version = tp->mii.supports_gmii ?
+               tp->mac_version = tp->supports_gmii ?
                                  RTL_GIGA_MAC_VER_46 :
                                  RTL_GIGA_MAC_VER_48;
        }
@@ -2553,7 +2247,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
 
 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
 {
-       dprintk("mac_version = 0x%02x\n", tp->mac_version);
+       netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version);
 }
 
 struct phy_reg {
@@ -4405,62 +4099,16 @@ static void rtl_hw_phy_config(struct net_device *dev)
        }
 }
 
-static void rtl_phy_work(struct rtl8169_private *tp)
-{
-       struct timer_list *timer = &tp->timer;
-       unsigned long timeout = RTL8169_PHY_TIMEOUT;
-
-       assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
-
-       if (tp->phy_reset_pending(tp)) {
-               /*
-                * A busy loop could burn quite a few cycles on nowadays CPU.
-                * Let's delay the execution of the timer for a few ticks.
-                */
-               timeout = HZ/10;
-               goto out_mod_timer;
-       }
-
-       if (tp->link_ok(tp))
-               return;
-
-       netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
-
-       tp->phy_reset_enable(tp);
-
-out_mod_timer:
-       mod_timer(timer, jiffies + timeout);
-}
-
 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
 {
        if (!test_and_set_bit(flag, tp->wk.flags))
                schedule_work(&tp->wk.work);
 }
 
-static void rtl8169_phy_timer(struct timer_list *t)
-{
-       struct rtl8169_private *tp = from_timer(tp, t, timer);
-
-       rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
-}
-
-DECLARE_RTL_COND(rtl_phy_reset_cond)
-{
-       return tp->phy_reset_pending(tp);
-}
-
-static void rtl8169_phy_reset(struct net_device *dev,
-                             struct rtl8169_private *tp)
-{
-       tp->phy_reset_enable(tp);
-       rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
-}
-
 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
 {
        return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
-           (RTL_R8(tp, PHYstatus) & TBI_Enable);
+              (RTL_R8(tp, PHYstatus) & TBI_Enable);
 }
 
 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
@@ -4468,7 +4116,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
        rtl_hw_phy_config(dev);
 
        if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
-               dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+               netif_dbg(tp, drv, dev,
+                         "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
                RTL_W8(tp, 0x82, 0x01);
        }
 
@@ -4478,23 +4127,18 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
                pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
 
        if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
-               dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+               netif_dbg(tp, drv, dev,
+                         "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
                RTL_W8(tp, 0x82, 0x01);
-               dprintk("Set PHY Reg 0x0bh = 0x00h\n");
+               netif_dbg(tp, drv, dev,
+                         "Set PHY Reg 0x0bh = 0x00h\n");
                rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
        }
 
-       rtl8169_phy_reset(dev, tp);
+       /* We may have called phy_speed_down before */
+       phy_speed_up(dev->phydev);
 
-       rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
-                         ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-                         (tp->mii.supports_gmii ?
-                          ADVERTISED_1000baseT_Half |
-                          ADVERTISED_1000baseT_Full : 0));
-
-       if (rtl_tbi_enabled(tp))
-               netif_info(tp, link, dev, "TBI auto-negotiating\n");
+       genphy_soft_reset(dev->phydev);
 }
 
 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4539,34 +4183,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
 
 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-       struct rtl8169_private *tp = netdev_priv(dev);
-       struct mii_ioctl_data *data = if_mii(ifr);
-
-       return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
-}
-
-static int rtl_xmii_ioctl(struct rtl8169_private *tp,
-                         struct mii_ioctl_data *data, int cmd)
-{
-       switch (cmd) {
-       case SIOCGMIIPHY:
-               data->phy_id = 32; /* Internal PHY */
-               return 0;
-
-       case SIOCGMIIREG:
-               data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
-               return 0;
-
-       case SIOCSMIIREG:
-               rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
-               return 0;
-       }
-       return -EOPNOTSUPP;
-}
+       if (!netif_running(dev))
+               return -ENODEV;
 
-static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
-{
-       return -EOPNOTSUPP;
+       return phy_mii_ioctl(dev->phydev, ifr, cmd);
 }
 
 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
@@ -4594,30 +4214,6 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
        }
 }
 
-static void rtl_speed_down(struct rtl8169_private *tp)
-{
-       u32 adv;
-       int lpa;
-
-       rtl_writephy(tp, 0x1f, 0x0000);
-       lpa = rtl_readphy(tp, MII_LPA);
-
-       if (lpa & (LPA_10HALF | LPA_10FULL))
-               adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
-       else if (lpa & (LPA_100HALF | LPA_100FULL))
-               adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-                     ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
-       else
-               adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-                     ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-                     (tp->mii.supports_gmii ?
-                      ADVERTISED_1000baseT_Half |
-                      ADVERTISED_1000baseT_Full : 0);
-
-       rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
-                         adv);
-}
-
 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
 {
        switch (tp->mac_version) {
@@ -4639,56 +4235,15 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
 
 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
 {
-       if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
+       if (!netif_running(tp->dev) || !__rtl8169_get_wol(tp))
                return false;
 
-       rtl_speed_down(tp);
+       phy_speed_down(tp->dev->phydev, false);
        rtl_wol_suspend_quirk(tp);
 
        return true;
 }
 
-static void r8168_phy_power_up(struct rtl8169_private *tp)
-{
-       rtl_writephy(tp, 0x1f, 0x0000);
-       switch (tp->mac_version) {
-       case RTL_GIGA_MAC_VER_11:
-       case RTL_GIGA_MAC_VER_12:
-       case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
-       case RTL_GIGA_MAC_VER_31:
-               rtl_writephy(tp, 0x0e, 0x0000);
-               break;
-       default:
-               break;
-       }
-       rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
-
-       /* give MAC/PHY some time to resume */
-       msleep(20);
-}
-
-static void r8168_phy_power_down(struct rtl8169_private *tp)
-{
-       rtl_writephy(tp, 0x1f, 0x0000);
-       switch (tp->mac_version) {
-       case RTL_GIGA_MAC_VER_32:
-       case RTL_GIGA_MAC_VER_33:
-       case RTL_GIGA_MAC_VER_40:
-       case RTL_GIGA_MAC_VER_41:
-               rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
-               break;
-
-       case RTL_GIGA_MAC_VER_11:
-       case RTL_GIGA_MAC_VER_12:
-       case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
-       case RTL_GIGA_MAC_VER_31:
-               rtl_writephy(tp, 0x0e, 0x0200);
-       default:
-               rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
-               break;
-       }
-}
-
 static void r8168_pll_power_down(struct rtl8169_private *tp)
 {
        if (r8168_check_dash(tp))
@@ -4701,8 +4256,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
        if (rtl_wol_pll_power_down(tp))
                return;
 
-       r8168_phy_power_down(tp);
-
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
        case RTL_GIGA_MAC_VER_37:
@@ -4754,7 +4307,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
                break;
        }
 
-       r8168_phy_power_up(tp);
+       phy_resume(tp->dev->phydev);
+       /* give MAC/PHY some time to resume */
+       msleep(20);
 }
 
 static void rtl_pll_power_down(struct rtl8169_private *tp)
@@ -5172,8 +4727,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
 
        if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
            tp->mac_version == RTL_GIGA_MAC_VER_03) {
-               dprintk("Set MAC Reg C+CR Offset 0xe0. "
-                       "Bit-3 and bit-14 MUST be 1\n");
+               netif_dbg(tp, drv, tp->dev,
+                         "Set MAC Reg C+CR Offset 0xe0. Bit 3 and Bit 14 MUST be 1\n");
                tp->cp_cmd |= (1 << 14);
        }
 
@@ -5236,12 +4791,7 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
        rtl_csi_write(tp, 0x070c, csi | val << 24);
 }
 
-static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
-{
-       rtl_csi_access_enable(tp, 0x17);
-}
-
-static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
+static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
 {
        rtl_csi_access_enable(tp, 0x27);
 }
@@ -5290,6 +4840,17 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
        RTL_W8(tp, Config3, data);
 }
 
+static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
+{
+       if (enable) {
+               RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
+               RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
+       } else {
+               RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+               RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+       }
+}
+
 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
 {
        RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -5337,7 +4898,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
                { 0x07, 0,      0x2000 }
        };
 
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
 
@@ -5346,7 +4907,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
 {
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
 
@@ -5359,7 +4920,7 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
 {
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
 
@@ -5383,7 +4944,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
                { 0x06, 0x0080, 0x0000 }
        };
 
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
 
@@ -5399,7 +4960,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
                { 0x03, 0x0400, 0x0220 }
        };
 
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
 
@@ -5413,14 +4974,14 @@ static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
 {
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        __rtl_hw_start_8168cp(tp);
 }
 
 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
 {
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_disable_clock_request(tp);
 
@@ -5435,7 +4996,7 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
 {
-       rtl_csi_access_enable_1(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        if (tp->dev->mtu <= ETH_DATA_LEN)
                rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5453,7 +5014,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
                { 0x0c, 0x0100, 0x0020 }
        };
 
-       rtl_csi_access_enable_1(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
 
@@ -5482,7 +5043,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
                { 0x0a, 0x0000, 0x0040 }
        };
 
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
 
@@ -5507,7 +5068,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
                { 0x19, 0x0000, 0x0224 }
        };
 
-       rtl_csi_access_enable_1(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
 
@@ -5536,11 +5097,13 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
        RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
        RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
+
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
 {
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
 
@@ -5611,7 +5174,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
 
-       rtl_csi_access_enable_1(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
 
@@ -5646,9 +5209,9 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
        rtl_hw_start_8168g(tp);
 
        /* disable aspm and clock request before access ephy */
-       RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
-       RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+       rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5681,9 +5244,9 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
        rtl_hw_start_8168g(tp);
 
        /* disable aspm and clock request before access ephy */
-       RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
-       RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+       rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
@@ -5700,8 +5263,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
        };
 
        /* disable aspm and clock request before access ephy */
-       RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
-       RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+       rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
 
        RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
@@ -5711,7 +5273,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
 
-       rtl_csi_access_enable_1(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
 
@@ -5780,6 +5342,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
        r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
        r8168_mac_ocp_write(tp, 0xc094, 0x0000);
        r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
+
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
@@ -5793,7 +5357,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
 
-       rtl_csi_access_enable_1(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
 
@@ -5831,11 +5395,12 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
        };
 
        /* disable aspm and clock request before access ephy */
-       RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
-       RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+       rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
 
        rtl_hw_start_8168ep(tp);
+
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
@@ -5847,14 +5412,15 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
        };
 
        /* disable aspm and clock request before access ephy */
-       RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
-       RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+       rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
 
        rtl_hw_start_8168ep(tp);
 
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
        RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
+
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
@@ -5868,8 +5434,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
        };
 
        /* disable aspm and clock request before access ephy */
-       RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
-       RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+       rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
 
        rtl_hw_start_8168ep(tp);
@@ -5889,6 +5454,8 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
        data = r8168_mac_ocp_read(tp, 0xe860);
        data |= 0x0080;
        r8168_mac_ocp_write(tp, 0xe860, data);
+
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 static void rtl_hw_start_8168(struct rtl8169_private *tp)
@@ -6006,8 +5573,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
                break;
 
        default:
-               printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
-                      tp->dev->name, tp->mac_version);
+               netif_err(tp, drv, tp->dev,
+                         "unknown chipset (mac_version = %d)\n",
+                         tp->mac_version);
                break;
        }
 }
@@ -6026,7 +5594,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
        };
        u8 cfg1;
 
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        RTL_W8(tp, DBG_REG, FIX_NAK_1);
 
@@ -6045,7 +5613,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
 {
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
 
@@ -6100,7 +5668,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
                { 0x1e, 0, 0x4000 }
        };
 
-       rtl_csi_access_enable_2(tp);
+       rtl_set_def_aspm_entry_latency(tp);
 
        /* Force LAN exit from ASPM if Rx/Tx are not idle */
        RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
@@ -6384,7 +5952,6 @@ static void rtl_reset_work(struct rtl8169_private *tp)
        napi_enable(&tp->napi);
        rtl_hw_start(tp);
        netif_wake_queue(dev);
-       rtl8169_check_link_status(dev, tp);
 }
 
 static void rtl8169_tx_timeout(struct net_device *dev)
@@ -7001,7 +6568,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
                rtl8169_pcierr_interrupt(dev);
 
        if (status & LinkChg)
-               rtl8169_check_link_status(dev, tp);
+               phy_mac_interrupt(dev->phydev);
 
        rtl_irq_enable_all(tp);
 }
@@ -7015,7 +6582,6 @@ static void rtl_task(struct work_struct *work)
                /* XXX - keep rtl_slow_event_work() as first element. */
                { RTL_FLAG_TASK_SLOW_PENDING,   rtl_slow_event_work },
                { RTL_FLAG_TASK_RESET_PENDING,  rtl_reset_work },
-               { RTL_FLAG_TASK_PHY_PENDING,    rtl_phy_work }
        };
        struct rtl8169_private *tp =
                container_of(work, struct rtl8169_private, wk.work);
@@ -7084,11 +6650,51 @@ static void rtl8169_rx_missed(struct net_device *dev)
        RTL_W32(tp, RxMissed, 0);
 }
 
+static void r8169_phylink_handler(struct net_device *ndev)
+{
+       struct rtl8169_private *tp = netdev_priv(ndev);
+
+       if (netif_carrier_ok(ndev)) {
+               rtl_link_chg_patch(tp);
+               pm_request_resume(&tp->pci_dev->dev);
+       } else {
+               pm_runtime_idle(&tp->pci_dev->dev);
+       }
+
+       if (net_ratelimit())
+               phy_print_status(ndev->phydev);
+}
+
+static int r8169_phy_connect(struct rtl8169_private *tp)
+{
+       struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0);
+       phy_interface_t phy_mode;
+       int ret;
+
+       phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
+                  PHY_INTERFACE_MODE_MII;
+
+       ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
+                                phy_mode);
+       if (ret)
+               return ret;
+
+       if (!tp->supports_gmii)
+               phy_set_max_speed(phydev, SPEED_100);
+
+       /* Ensure to advertise everything, incl. pause */
+       phydev->advertising = phydev->supported;
+
+       phy_attached_info(phydev);
+
+       return 0;
+}
+
 static void rtl8169_down(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       del_timer_sync(&tp->timer);
+       phy_stop(dev->phydev);
 
        napi_disable(&tp->napi);
        netif_stop_queue(dev);
@@ -7129,6 +6735,8 @@ static int rtl8169_close(struct net_device *dev)
 
        cancel_work_sync(&tp->wk.work);
 
+       phy_disconnect(dev->phydev);
+
        pci_free_irq(pdev, 0, tp);
 
        dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
@@ -7148,7 +6756,7 @@ static void rtl8169_netpoll(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev);
+       rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
 }
 #endif
 
@@ -7189,6 +6797,10 @@ static int rtl_open(struct net_device *dev)
        if (retval < 0)
                goto err_release_fw_2;
 
+       retval = r8169_phy_connect(tp);
+       if (retval)
+               goto err_free_irq;
+
        rtl_lock_work(tp);
 
        set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
@@ -7204,17 +6816,17 @@ static int rtl_open(struct net_device *dev)
        if (!rtl8169_init_counter_offsets(tp))
                netif_warn(tp, hw, dev, "counter reset/update failed\n");
 
+       phy_start(dev->phydev);
        netif_start_queue(dev);
 
        rtl_unlock_work(tp);
 
-       tp->saved_wolopts = 0;
        pm_runtime_put_sync(&pdev->dev);
-
-       rtl8169_check_link_status(dev, tp);
 out:
        return retval;
 
+err_free_irq:
+       pci_free_irq(pdev, 0, tp);
 err_release_fw_2:
        rtl_release_firmware(tp);
        rtl8169_rx_clear(tp);
@@ -7293,6 +6905,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
        if (!netif_running(dev))
                return;
 
+       phy_stop(dev->phydev);
        netif_device_detach(dev);
        netif_stop_queue(dev);
 
@@ -7323,6 +6936,9 @@ static void __rtl8169_resume(struct net_device *dev)
        netif_device_attach(dev);
 
        rtl_pll_power_up(tp);
+       rtl8169_init_phy(dev, tp);
+
+       phy_start(tp->dev->phydev);
 
        rtl_lock_work(tp);
        napi_enable(&tp->napi);
@@ -7336,9 +6952,6 @@ static int rtl8169_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
-       struct rtl8169_private *tp = netdev_priv(dev);
-
-       rtl8169_init_phy(dev, tp);
 
        if (netif_running(dev))
                __rtl8169_resume(dev);
@@ -7352,13 +6965,10 @@ static int rtl8169_runtime_suspend(struct device *device)
        struct net_device *dev = pci_get_drvdata(pdev);
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       if (!tp->TxDescArray) {
-               rtl_pll_power_down(tp);
+       if (!tp->TxDescArray)
                return 0;
-       }
 
        rtl_lock_work(tp);
-       tp->saved_wolopts = __rtl8169_get_wol(tp);
        __rtl8169_set_wol(tp, WAKE_ANY);
        rtl_unlock_work(tp);
 
@@ -7383,11 +6993,8 @@ static int rtl8169_runtime_resume(struct device *device)
 
        rtl_lock_work(tp);
        __rtl8169_set_wol(tp, tp->saved_wolopts);
-       tp->saved_wolopts = 0;
        rtl_unlock_work(tp);
 
-       rtl8169_init_phy(dev, tp);
-
        __rtl8169_resume(dev);
 
        return 0;
@@ -7455,7 +7062,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
        rtl8169_hw_reset(tp);
 
        if (system_state == SYSTEM_POWER_OFF) {
-               if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+               if (tp->saved_wolopts) {
                        rtl_wol_suspend_quirk(tp);
                        rtl_wol_shutdown_quirk(tp);
                }
@@ -7476,6 +7083,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
        netif_napi_del(&tp->napi);
 
        unregister_netdev(dev);
+       mdiobus_unregister(tp->mii_bus);
 
        rtl_release_firmware(tp);
 
@@ -7561,6 +7169,68 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond)
        return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
 }
 
+static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
+{
+       struct rtl8169_private *tp = mii_bus->priv;
+
+       if (phyaddr > 0)
+               return -ENODEV;
+
+       return rtl_readphy(tp, phyreg);
+}
+
+static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
+                               int phyreg, u16 val)
+{
+       struct rtl8169_private *tp = mii_bus->priv;
+
+       if (phyaddr > 0)
+               return -ENODEV;
+
+       rtl_writephy(tp, phyreg, val);
+
+       return 0;
+}
+
+static int r8169_mdio_register(struct rtl8169_private *tp)
+{
+       struct pci_dev *pdev = tp->pci_dev;
+       struct phy_device *phydev;
+       struct mii_bus *new_bus;
+       int ret;
+
+       new_bus = devm_mdiobus_alloc(&pdev->dev);
+       if (!new_bus)
+               return -ENOMEM;
+
+       new_bus->name = "r8169";
+       new_bus->priv = tp;
+       new_bus->parent = &pdev->dev;
+       new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
+       snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x",
+                PCI_DEVID(pdev->bus->number, pdev->devfn));
+
+       new_bus->read = r8169_mdio_read_reg;
+       new_bus->write = r8169_mdio_write_reg;
+
+       ret = mdiobus_register(new_bus);
+       if (ret)
+               return ret;
+
+       phydev = mdiobus_get_phy(new_bus, 0);
+       if (!phydev) {
+               mdiobus_unregister(new_bus);
+               return -ENODEV;
+       }
+
+       /* PHY will be woken up in rtl_open() */
+       phy_suspend(phydev);
+
+       tp->mii_bus = new_bus;
+
+       return 0;
+}
+
 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
 {
        u32 data;
@@ -7618,7 +7288,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
        struct rtl8169_private *tp;
-       struct mii_if_info *mii;
        struct net_device *dev;
        int chipset, region, i;
        int rc;
@@ -7638,19 +7307,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->dev = dev;
        tp->pci_dev = pdev;
        tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
-
-       mii = &tp->mii;
-       mii->dev = dev;
-       mii->mdio_read = rtl_mdio_read;
-       mii->mdio_write = rtl_mdio_write;
-       mii->phy_id_mask = 0x1f;
-       mii->reg_num_mask = 0x1f;
-       mii->supports_gmii = cfg->has_gmii;
-
-       /* disable ASPM completely as that cause random device stop working
-        * problems as well as full system hangs for some PCIe devices users */
-       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
-                                    PCIE_LINK_STATE_CLKPM);
+       tp->supports_gmii = cfg->has_gmii;
 
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
        rc = pcim_enable_device(pdev);
@@ -7689,6 +7346,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Identify chip attached to board */
        rtl8169_get_mac_version(tp, cfg->default_ver);
 
+       if (rtl_tbi_enabled(tp)) {
+               dev_err(&pdev->dev, "TBI fiber mode not supported\n");
+               return -ENODEV;
+       }
+
        tp->cp_cmd = RTL_R16(tp, CPlusCmd);
 
        if ((sizeof(dma_addr_t) > 4) &&
@@ -7737,22 +7399,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* override BIOS settings, use userspace tools to enable WOL */
        __rtl8169_set_wol(tp, 0);
 
-       if (rtl_tbi_enabled(tp)) {
-               tp->set_speed = rtl8169_set_speed_tbi;
-               tp->get_link_ksettings = rtl8169_get_link_ksettings_tbi;
-               tp->phy_reset_enable = rtl8169_tbi_reset_enable;
-               tp->phy_reset_pending = rtl8169_tbi_reset_pending;
-               tp->link_ok = rtl8169_tbi_link_ok;
-               tp->do_ioctl = rtl_tbi_ioctl;
-       } else {
-               tp->set_speed = rtl8169_set_speed_xmii;
-               tp->get_link_ksettings = rtl8169_get_link_ksettings_xmii;
-               tp->phy_reset_enable = rtl8169_xmii_reset_enable;
-               tp->phy_reset_pending = rtl8169_xmii_reset_pending;
-               tp->link_ok = rtl8169_xmii_link_ok;
-               tp->do_ioctl = rtl_xmii_ioctl;
-       }
-
        mutex_init(&tp->wk.mutex);
        u64_stats_init(&tp->rx_stats.syncp);
        u64_stats_init(&tp->tx_stats.syncp);
@@ -7823,8 +7469,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->event_slow = cfg->event_slow;
        tp->coalesce_info = cfg->coalesce_info;
 
-       timer_setup(&tp->timer, rtl8169_phy_timer, 0);
-
        tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
 
        tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
@@ -7835,10 +7479,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, dev);
 
-       rc = register_netdev(dev);
-       if (rc < 0)
+       rc = r8169_mdio_register(tp);
+       if (rc)
                return rc;
 
+       /* chip gets powered up in rtl_open() */
+       rtl_pll_power_down(tp);
+
+       rc = register_netdev(dev);
+       if (rc)
+               goto err_mdio_unregister;
+
        netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n",
                   rtl_chip_infos[chipset].name, dev->dev_addr,
                   (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff),
@@ -7853,12 +7504,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (r8168_check_dash(tp))
                rtl8168_driver_start(tp);
 
-       netif_carrier_off(dev);
-
        if (pci_dev_run_wake(pdev))
                pm_runtime_put_sync(&pdev->dev);
 
        return 0;
+
+err_mdio_unregister:
+       mdiobus_unregister(tp->mii_bus);
+       return rc;
 }
 
 static struct pci_driver rtl8169_pci_driver = {
index 27be51f0a421b43e191e594bdb6ebcd753b65eef..f3f7477043ce106155ca30ba7c07fb7d20e968bc 100644 (file)
@@ -17,7 +17,6 @@ if NET_VENDOR_RENESAS
 
 config SH_ETH
        tristate "Renesas SuperH Ethernet support"
-       depends on HAS_DMA
        depends on ARCH_RENESAS || SUPERH || COMPILE_TEST
        select CRC32
        select MII
@@ -31,7 +30,6 @@ config SH_ETH
 
 config RAVB
        tristate "Renesas Ethernet AVB support"
-       depends on HAS_DMA
        depends on ARCH_RENESAS || COMPILE_TEST
        select CRC32
        select MII
index 68f122140966d4de381b47fa192246eb7606707a..f7e649c831ad55959c88571f0e9c296c572836bc 100644 (file)
@@ -1226,7 +1226,7 @@ static int ravb_get_sset_count(struct net_device *netdev, int sset)
 }
 
 static void ravb_get_ethtool_stats(struct net_device *ndev,
-                                  struct ethtool_stats *stats, u64 *data)
+                                  struct ethtool_stats *estats, u64 *data)
 {
        struct ravb_private *priv = netdev_priv(ndev);
        int i = 0;
@@ -1258,7 +1258,7 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
 {
        switch (stringset) {
        case ETH_SS_STATS:
-               memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+               memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
                break;
        }
 }
@@ -1623,7 +1623,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                /* TAG and timestamp required flag */
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
-               desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
+               desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
        }
 
        skb_tx_timestamp(skb);
@@ -1656,7 +1656,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 }
 
 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
-                            void *accel_priv, select_queue_fallback_t fallback)
+                            struct net_device *sb_dev,
+                            select_queue_fallback_t fallback)
 {
        /* If skb needs TX timestamp, it is handled in network control queue */
        return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
index e9007b613f17ca8de16b67e054df42a800522fb5..71651e47660a256ee901e5286475b8ea3d817af0 100644 (file)
@@ -622,7 +622,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
        .tpauser        = 1,
        .hw_swap        = 1,
        .rpadir         = 1,
-       .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
        .xdfar_rw       = 1,
@@ -672,7 +671,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
        .bculr          = 1,
        .hw_swap        = 1,
        .rpadir         = 1,
-       .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
        .xdfar_rw       = 1,
@@ -798,7 +796,6 @@ static struct sh_eth_cpu_data r8a77980_data = {
        .hw_swap        = 1,
        .nbst           = 1,
        .rpadir         = 1,
-       .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
        .xdfar_rw       = 1,
@@ -851,7 +848,6 @@ static struct sh_eth_cpu_data sh7724_data = {
        .tpauser        = 1,
        .hw_swap        = 1,
        .rpadir         = 1,
-       .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
 };
 
 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
@@ -898,7 +894,6 @@ static struct sh_eth_cpu_data sh7757_data = {
        .hw_swap        = 1,
        .no_ade         = 1,
        .rpadir         = 1,
-       .rpadir_value   = 2 << 16,
        .rtrate         = 1,
        .dual_port      = 1,
 };
@@ -978,7 +973,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
        .bculr          = 1,
        .hw_swap        = 1,
        .rpadir         = 1,
-       .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
        .xdfar_rw       = 1,
@@ -1467,7 +1461,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
        /* Descriptor format */
        sh_eth_ring_format(ndev);
        if (mdp->cd->rpadir)
-               sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
+               sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR);
 
        /* all sh_eth int mask */
        sh_eth_write(ndev, 0, EESIPR);
@@ -1527,9 +1521,9 @@ static int sh_eth_dev_init(struct net_device *ndev)
 
        /* mask reset */
        if (mdp->cd->apr)
-               sh_eth_write(ndev, APR_AP, APR);
+               sh_eth_write(ndev, 1, APR);
        if (mdp->cd->mpr)
-               sh_eth_write(ndev, MPR_MP, MPR);
+               sh_eth_write(ndev, 1, MPR);
        if (mdp->cd->tpauser)
                sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
 
index 726c55a82dd7b76e1d90a836cddcd66bcbf96a24..140ad2c570950c65c016aa491a89ce4056b87188 100644 (file)
@@ -383,12 +383,12 @@ enum ECSIPR_STATUS_MASK_BIT {
 
 /* APR */
 enum APR_BIT {
-       APR_AP = 0x00000001,
+       APR_AP = 0x0000ffff,
 };
 
 /* MPR */
 enum MPR_BIT {
-       MPR_MP = 0x00000001,
+       MPR_MP = 0x0000ffff,
 };
 
 /* TRSCER */
@@ -403,8 +403,7 @@ enum DESC_I_BIT {
 
 /* RPADIR */
 enum RPADIR_BIT {
-       RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
-       RPADIR_PADR = 0x0003f,
+       RPADIR_PADS = 0x1f0000, RPADIR_PADR = 0xffff,
 };
 
 /* FDR */
@@ -488,7 +487,6 @@ struct sh_eth_cpu_data {
        u32 ecsipr_value;
        u32 fdr_value;
        u32 fcftr_value;
-       u32 rpadir_value;
 
        /* interrupt checking mask */
        u32 tx_check;
index 3bac58d0f88b20f8982295a2efa09c9096d98822..c5c297e78d068f41968fefc80e049b9ba1b70c62 100644 (file)
@@ -6,3 +6,5 @@ sfc-$(CONFIG_SFC_MTD)   += mtd.o
 sfc-$(CONFIG_SFC_SRIOV)        += sriov.o siena_sriov.o ef10_sriov.o
 
 obj-$(CONFIG_SFC)      += sfc.o
+
+obj-$(CONFIG_SFC_FALCON) += falcon/
index 019cef1d3cf72ce2d34b3a36283cabf9227c9879..3d76fd1504c2bc38d80b051ad885f8e0b022aba9 100644 (file)
@@ -199,7 +199,7 @@ static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
                return -ENOMEM;
 
        for (i = 0; i < efx->vf_count; i++) {
-               random_ether_addr(nic_data->vf[i].mac);
+               eth_random_addr(nic_data->vf[i].mac);
                nic_data->vf[i].efx = NULL;
                nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
 
@@ -564,7 +564,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        struct ef10_vf *vf;
-       u16 old_vlan, new_vlan;
+       u16 new_vlan;
        int rc = 0, rc2 = 0;
 
        if (vf_i >= efx->vf_count)
@@ -619,7 +619,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
        }
 
        /* Do the actual vlan change */
-       old_vlan = vf->vlan;
        vf->vlan = new_vlan;
 
        /* Restore everything in reverse order */
index ad4a354ce570e143a741e7ab7155ae84a8a5df34..b24c2e21db8e9df6eb1abd283c084858113bbe37 100644 (file)
@@ -264,11 +264,17 @@ static int efx_check_disabled(struct efx_nic *efx)
 static int efx_process_channel(struct efx_channel *channel, int budget)
 {
        struct efx_tx_queue *tx_queue;
+       struct list_head rx_list;
        int spent;
 
        if (unlikely(!channel->enabled))
                return 0;
 
+       /* Prepare the batch receive list */
+       EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+       INIT_LIST_HEAD(&rx_list);
+       channel->rx_list = &rx_list;
+
        efx_for_each_channel_tx_queue(tx_queue, channel) {
                tx_queue->pkts_compl = 0;
                tx_queue->bytes_compl = 0;
@@ -291,6 +297,10 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
                }
        }
 
+       /* Receive any packets we queued up */
+       netif_receive_skb_list(channel->rx_list);
+       channel->rx_list = NULL;
+
        return spent;
 }
 
@@ -555,6 +565,8 @@ static int efx_probe_channel(struct efx_channel *channel)
                        goto fail;
        }
 
+       channel->rx_list = NULL;
+
        return 0;
 
 fail:
@@ -3180,6 +3192,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
        return true;
 }
 
+static
 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
                                       const struct efx_filter_spec *spec)
 {
index 8edf20967c82c583bb59ace5f1f9c30dcfd1530d..e045a5d6b938f43f391a726f301d8911f156b32c 100644 (file)
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
        if (!state)
                return -ENOMEM;
        efx->filter_state = state;
+       init_rwsem(&state->lock);
 
        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
        table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
index 65568925c3efe6398d1e26ca1520b53919f60291..961b9297964069440a962e39cd409760b71c073a 100644 (file)
@@ -448,6 +448,7 @@ enum efx_sync_events_state {
  *     __efx_rx_packet(), or zero if there is none
  * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
  *     by __efx_rx_packet(), if @rx_pkt_n_frags != 0
+ * @rx_list: list of SKBs from current RX, awaiting processing
  * @rx_queue: RX queue for this channel
  * @tx_queue: TX queues for this channel
  * @sync_events_state: Current state of sync events on this channel
@@ -500,6 +501,8 @@ struct efx_channel {
        unsigned int rx_pkt_n_frags;
        unsigned int rx_pkt_index;
 
+       struct list_head *rx_list;
+
        struct efx_rx_queue rx_queue;
        struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
 
index d2e254f2f72bf0f2fffd0078397da819a676980f..396ff01298cdfd4d8ccc5566e36dba778a7f4e59 100644 (file)
@@ -634,7 +634,12 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
                        return;
 
        /* Pass the packet up */
-       netif_receive_skb(skb);
+       if (channel->rx_list != NULL)
+               /* Add to list, will pass up later */
+               list_add_tail(&skb->list, channel->rx_list);
+       else
+               /* No list, so pass it up now */
+               netif_receive_skb(skb);
 }
 
 /* Handle a received packet.  Second half: Touches packet payload. */
index 949aaef390b67bbf9a21c15a24f912898b422229..15c62c160953308b3f4018e8bd7973dee9fa59cd 100644 (file)
@@ -321,7 +321,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        static int card_idx = -1;
        void __iomem *ioaddr;
        int chip_idx = (int) ent->driver_data;
-       int irq;
        struct net_device *dev;
        struct epic_private *ep;
        int i, ret, option = 0, duplex = 0;
@@ -338,7 +337,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        ret = pci_enable_device(pdev);
        if (ret)
                goto out;
-       irq = pdev->irq;
 
        if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
                dev_err(&pdev->dev, "no PCI region space\n");
index e080d3e7c582ff7df2a2fe0ecf6074ac4a306470..01589b6982e4d433d4936a4bb6fa2c2510d278c0 100644 (file)
@@ -780,11 +780,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
 static int netsec_napi_poll(struct napi_struct *napi, int budget)
 {
        struct netsec_priv *priv;
-       struct net_device *ndev;
        int tx, rx, done, todo;
 
        priv = container_of(napi, struct netsec_priv, napi);
-       ndev = priv->ndev;
 
        todo = budget;
        do {
index cb5b0f58c395c2bdbf32e7283d91cf8c4ac5dbe9..edf20361ea5f15c7ddee617f899e31b92d7e261e 100644 (file)
@@ -111,7 +111,7 @@ config DWMAC_ROCKCHIP
 config DWMAC_SOCFPGA
        tristate "SOCFPGA dwmac support"
        default ARCH_SOCFPGA
-       depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
+       depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
        select MFD_SYSCON
        help
          Support for ethernet controller on Altera SOCFPGA
index f08625a02cea03f8dcf55ca3f9cc0f06fa4e3ca5..7b923362ee5509d6fdedc9c15d09cd760182fab2 100644 (file)
@@ -61,6 +61,7 @@ struct rk_priv_data {
        struct clk *mac_clk_tx;
        struct clk *clk_mac_ref;
        struct clk *clk_mac_refout;
+       struct clk *clk_mac_speed;
        struct clk *aclk_mac;
        struct clk *pclk_mac;
        struct clk *clk_phy;
@@ -83,6 +84,64 @@ struct rk_priv_data {
        (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
         ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
 
+#define PX30_GRF_GMAC_CON1             0x0904
+
+/* PX30_GRF_GMAC_CON1 */
+#define PX30_GMAC_PHY_INTF_SEL_RMII    (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
+                                        GRF_BIT(6))
+#define PX30_GMAC_SPEED_10M            GRF_CLR_BIT(2)
+#define PX30_GMAC_SPEED_100M           GRF_BIT(2)
+
+static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+
+       if (IS_ERR(bsp_priv->grf)) {
+               dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+               return;
+       }
+
+       regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+                    PX30_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+       struct device *dev = &bsp_priv->pdev->dev;
+       int ret;
+
+       if (IS_ERR(bsp_priv->clk_mac_speed)) {
+               dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
+               return;
+       }
+
+       if (speed == 10) {
+               regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+                            PX30_GMAC_SPEED_10M);
+
+               ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
+               if (ret)
+                       dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
+                               __func__, ret);
+       } else if (speed == 100) {
+               regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+                            PX30_GMAC_SPEED_100M);
+
+               ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
+               if (ret)
+                       dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
+                               __func__, ret);
+
+       } else {
+               dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+       }
+}
+
+static const struct rk_gmac_ops px30_ops = {
+       .set_to_rmii = px30_set_to_rmii,
+       .set_rmii_speed = px30_set_rmii_speed,
+};
+
 #define RK3128_GRF_MAC_CON0    0x0168
 #define RK3128_GRF_MAC_CON1    0x016c
 
@@ -1042,6 +1101,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
                }
        }
 
+       bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed");
+       if (IS_ERR(bsp_priv->clk_mac_speed))
+               dev_err(dev, "cannot get clock %s\n", "clk_mac_speed");
+
        if (bsp_priv->clock_input) {
                dev_info(dev, "clock input from PHY\n");
        } else {
@@ -1094,6 +1157,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
                        if (!IS_ERR(bsp_priv->mac_clk_tx))
                                clk_prepare_enable(bsp_priv->mac_clk_tx);
 
+                       if (!IS_ERR(bsp_priv->clk_mac_speed))
+                               clk_prepare_enable(bsp_priv->clk_mac_speed);
+
                        /**
                         * if (!IS_ERR(bsp_priv->clk_mac))
                         *      clk_prepare_enable(bsp_priv->clk_mac);
@@ -1118,6 +1184,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
                        clk_disable_unprepare(bsp_priv->pclk_mac);
 
                        clk_disable_unprepare(bsp_priv->mac_clk_tx);
+
+                       clk_disable_unprepare(bsp_priv->clk_mac_speed);
                        /**
                         * if (!IS_ERR(bsp_priv->clk_mac))
                         *      clk_disable_unprepare(bsp_priv->clk_mac);
@@ -1414,6 +1482,7 @@ static int rk_gmac_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
 
 static const struct of_device_id rk_gmac_dwmac_match[] = {
+       { .compatible = "rockchip,px30-gmac",   .data = &px30_ops   },
        { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
        { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
        { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
index 6e359572b9f0ea53ed46b553fb1cb51273415f57..5b3b06a0a3bf53e1eac9572ae8d14add0c3835e7 100644 (file)
@@ -55,6 +55,7 @@ struct socfpga_dwmac {
        struct  device *dev;
        struct regmap *sys_mgr_base_addr;
        struct reset_control *stmmac_rst;
+       struct reset_control *stmmac_ocp_rst;
        void __iomem *splitter_base;
        bool f2h_ptp_ref_clk;
        struct tse_pcs pcs;
@@ -262,8 +263,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
                val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
 
        /* Assert reset to the enet controller before changing the phy mode */
-       if (dwmac->stmmac_rst)
-               reset_control_assert(dwmac->stmmac_rst);
+       reset_control_assert(dwmac->stmmac_ocp_rst);
+       reset_control_assert(dwmac->stmmac_rst);
 
        regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
        ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
@@ -288,8 +289,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
        /* Deassert reset for the phy configuration to be sampled by
         * the enet controller, and operation to start in requested mode
         */
-       if (dwmac->stmmac_rst)
-               reset_control_deassert(dwmac->stmmac_rst);
+       reset_control_deassert(dwmac->stmmac_ocp_rst);
+       reset_control_deassert(dwmac->stmmac_rst);
        if (phymode == PHY_INTERFACE_MODE_SGMII) {
                if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
                        dev_err(dwmac->dev, "Unable to initialize TSE PCS");
@@ -324,6 +325,15 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
                goto err_remove_config_dt;
        }
 
+       dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
+       if (IS_ERR(dwmac->stmmac_ocp_rst)) {
+               ret = PTR_ERR(dwmac->stmmac_ocp_rst);
+               dev_err(dev, "error getting reset control of ocp %d\n", ret);
+               goto err_remove_config_dt;
+       }
+
+       reset_control_deassert(dwmac->stmmac_ocp_rst);
+
        ret = socfpga_dwmac_parse_data(dwmac, dev);
        if (ret) {
                dev_err(dev, "Unable to parse OF data\n");
index d37f17ca62fecf66a6b5af1c9aa105923310a341..edb6053bd9802574ee5b5ec9a1cea4de7678214e 100644 (file)
@@ -407,6 +407,29 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
        }
 }
 
+static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
+{
+       u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+       mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
+       if (qmode != MTL_QUEUE_AVB)
+               mtl_tx_op |= MTL_OP_MODE_TXQEN;
+       else
+               mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
+
+       writel(mtl_tx_op, ioaddr +  MTL_CHAN_TX_OP_MODE(channel));
+}
+
+static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+       u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+       value &= ~DMA_RBSZ_MASK;
+       value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+
+       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+}
+
 const struct stmmac_dma_ops dwmac4_dma_ops = {
        .reset = dwmac4_dma_reset,
        .init = dwmac4_dma_init,
@@ -431,6 +454,8 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
+       .qmode = dwmac4_qmode,
+       .set_bfsize = dwmac4_set_bfsize,
 };
 
 const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -457,4 +482,6 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
+       .qmode = dwmac4_qmode,
+       .set_bfsize = dwmac4_set_bfsize,
 };
index c63c1fe3f26b9e4d5cb714ea3ceed56bf103b17e..22a4a6dbb1a4af42d3d7467e3ebca50efef57986 100644 (file)
 
 /* DMA Rx Channel X Control register defines */
 #define DMA_CONTROL_SR                 BIT(0)
+#define DMA_RBSZ_MASK                  GENMASK(14, 1)
+#define DMA_RBSZ_SHIFT                 1
 
 /* Interrupt status per channel */
 #define DMA_CHAN_STATUS_REB            GENMASK(21, 19)
index e44e7b26ce829be0eff000c6a68b064139d532b8..79911eefc2a7249ec347260e4026d5ebdbfe499a 100644 (file)
@@ -183,6 +183,8 @@ struct stmmac_dma_ops {
        void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+       void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
+       void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
 };
 
 #define stmmac_reset(__priv, __args...) \
@@ -235,6 +237,10 @@ struct stmmac_dma_ops {
        stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
 #define stmmac_enable_tso(__priv, __args...) \
        stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_dma_qmode(__priv, __args...) \
+       stmmac_do_void_callback(__priv, dma, qmode, __args)
+#define stmmac_set_dma_bfsize(__priv, __args...) \
+       stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
 
 struct mac_device_info;
 struct net_device;
@@ -441,17 +447,22 @@ struct stmmac_mode_ops {
 
 struct stmmac_priv;
 struct tc_cls_u32_offload;
+struct tc_cbs_qopt_offload;
 
 struct stmmac_tc_ops {
        int (*init)(struct stmmac_priv *priv);
        int (*setup_cls_u32)(struct stmmac_priv *priv,
                             struct tc_cls_u32_offload *cls);
+       int (*setup_cbs)(struct stmmac_priv *priv,
+                        struct tc_cbs_qopt_offload *qopt);
 };
 
 #define stmmac_tc_init(__priv, __args...) \
        stmmac_do_callback(__priv, tc, init, __args)
 #define stmmac_tc_setup_cls_u32(__priv, __args...) \
        stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
+#define stmmac_tc_setup_cbs(__priv, __args...) \
+       stmmac_do_callback(__priv, tc, setup_cbs, __args)
 
 struct stmmac_regs_off {
        u32 ptp_off;
index e79b0d7b388a16d524917b0dfed1b4dd2f079c2f..d9e60cfd8a85b56725bc9e054c1b8a4fcdf048a5 100644 (file)
@@ -928,6 +928,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
 static int stmmac_init_phy(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
        struct phy_device *phydev;
        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
        char bus_id[MII_BUS_ID_SIZE];
@@ -968,6 +969,15 @@ static int stmmac_init_phy(struct net_device *dev)
                phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
                                         SUPPORTED_1000baseT_Full);
 
+       /*
+        * Half-duplex mode not supported with multiqueue
+        * half-duplex can only works with single queue
+        */
+       if (tx_cnt > 1)
+               phydev->supported &= ~(SUPPORTED_1000baseT_Half |
+                                      SUPPORTED_100baseT_Half |
+                                      SUPPORTED_10baseT_Half);
+
        /*
         * Broken HW is sometimes missing the pull-up resistor on the
         * MDIO line, which results in reads to non-existent devices returning
@@ -1794,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
 
                stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
                                rxfifosz, qmode);
+               stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
+                               chan);
        }
 
        for (chan = 0; chan < tx_channels_count; chan++) {
@@ -3766,7 +3778,7 @@ static int stmmac_setup_tc_block(struct stmmac_priv *priv,
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
-                               priv, priv);
+                               priv, priv, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
                return 0;
@@ -3783,6 +3795,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
        switch (type) {
        case TC_SETUP_BLOCK:
                return stmmac_setup_tc_block(priv, type_data);
+       case TC_SETUP_QDISC_CBS:
+               return stmmac_tc_setup_cbs(priv, priv, type_data);
        default:
                return -EOPNOTSUPP;
        }
index 2258cd8cc84413f22c19a9339a3b6193427c464e..1a96dd9c1091e6c515753132c4a3fd0128f41bdf 100644 (file)
@@ -289,7 +289,67 @@ static int tc_init(struct stmmac_priv *priv)
        return 0;
 }
 
+static int tc_setup_cbs(struct stmmac_priv *priv,
+                       struct tc_cbs_qopt_offload *qopt)
+{
+       u32 tx_queues_count = priv->plat->tx_queues_to_use;
+       u32 queue = qopt->queue;
+       u32 ptr, speed_div;
+       u32 mode_to_use;
+       u64 value;
+       int ret;
+
+       /* Queue 0 is not AVB capable */
+       if (queue <= 0 || queue >= tx_queues_count)
+               return -EINVAL;
+       if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
+               return -EOPNOTSUPP;
+
+       mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+       if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
+               ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
+               if (ret)
+                       return ret;
+
+               priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+       } else if (!qopt->enable) {
+               return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
+       }
+
+       /* Port Transmit Rate and Speed Divider */
+       ptr = (priv->speed == SPEED_100) ? 4 : 8;
+       speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
+
+       /* Final adjustments for HW */
+       value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+       priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+
+       value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+       priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
+
+       value = qopt->hicredit * 1024ll * 8;
+       priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
+
+       value = qopt->locredit * 1024ll * 8;
+       priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
+
+       ret = stmmac_config_cbs(priv, priv->hw,
+                               priv->plat->tx_queues_cfg[queue].send_slope,
+                               priv->plat->tx_queues_cfg[queue].idle_slope,
+                               priv->plat->tx_queues_cfg[queue].high_credit,
+                               priv->plat->tx_queues_cfg[queue].low_credit,
+                               queue);
+       if (ret)
+               return ret;
+
+       dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
+                       queue, qopt->sendslope, qopt->idleslope,
+                       qopt->hicredit, qopt->locredit);
+       return 0;
+}
+
 const struct stmmac_tc_ops dwmac510_tc_ops = {
        .init = tc_init,
        .setup_cls_u32 = tc_setup_cls_u32,
+       .setup_cbs = tc_setup_cbs,
 };
index a5dd627fe2f9237a1af445c9ce2409fd0976c76c..d42f47f6c632fe8618348d40fc609bfed5deef4a 100644 (file)
@@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
 }
 
 static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
-                           void *accel_priv, select_queue_fallback_t fallback)
+                           struct net_device *sb_dev,
+                           select_queue_fallback_t fallback)
 {
        struct vnet_port *port = netdev_priv(dev);
 
index 88c12474a0c38cc10f539d7eff0b81a0cc9a4d7a..9319d84bf49f07e9a9cf8514783dacb32b78fee7 100644 (file)
@@ -1225,25 +1225,9 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
 
        bmsr = err;
        if (bmsr & BMSR_LSTATUS) {
-               u16 adv, lpa;
-
-               err = mii_read(np, np->phy_addr, MII_ADVERTISE);
-               if (err < 0)
-                       goto out;
-               adv = err;
-
-               err = mii_read(np, np->phy_addr, MII_LPA);
-               if (err < 0)
-                       goto out;
-               lpa = err;
-
-               err = mii_read(np, np->phy_addr, MII_ESTATUS);
-               if (err < 0)
-                       goto out;
                link_up = 1;
                current_speed = SPEED_1000;
                current_duplex = DUPLEX_FULL;
-
        }
        lp->active_speed = current_speed;
        lp->active_duplex = current_duplex;
index 7a16d40a72d13cf1d522e8a3a396c826fe76f9b9..b9221fc1674dfa0ef17a43f8ff86d700a1ae514f 100644 (file)
@@ -60,8 +60,7 @@
 #include <linux/sungem_phy.h>
 #include "sungem.h"
 
-/* Stripping FCS is causing problems, disabled for now */
-#undef STRIP_FCS
+#define STRIP_FCS
 
 #define DEFAULT_MSG    (NETIF_MSG_DRV          | \
                         NETIF_MSG_PROBE        | \
@@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
        writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
        writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+              (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
        writel(val, gp->regs + RXDMA_CFG);
        if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
                writel(((5 & RXDMA_BLANK_IPKTS) |
@@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
        struct net_device *dev = gp->dev;
        int entry, drops, work_done = 0;
        u32 done;
-       __sum16 csum;
 
        if (netif_msg_rx_status(gp))
                printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
                        skb = copy_skb;
                }
 
-               csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
-               skb->csum = csum_unfold(csum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
+               if (likely(dev->features & NETIF_F_RXCSUM)) {
+                       __sum16 csum;
+
+                       csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+                       skb->csum = csum_unfold(csum);
+                       skb->ip_summed = CHECKSUM_COMPLETE;
+               }
                skb->protocol = eth_type_trans(skb, gp->dev);
 
                napi_gro_receive(&gp->napi, skb);
@@ -1761,7 +1763,7 @@ static void gem_init_dma(struct gem *gp)
        writel(0, gp->regs + TXDMA_KICK);
 
        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+              (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
        writel(val, gp->regs + RXDMA_CFG);
 
        writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
@@ -2985,8 +2987,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, dev);
 
        /* We can do scatter/gather and HW checksum */
-       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
-       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+       dev->features = dev->hw_features;
        if (pci_using_dac)
                dev->features |= NETIF_F_HIGHDMA;
 
index a94f50442613e9f77cec6aff24fbf19a5a33756b..12539b357a78402dfc80a4a654761051a2fa6409 100644 (file)
@@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
 }
 
 static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
-                            void *accel_priv, select_queue_fallback_t fallback)
+                            struct net_device *sb_dev,
+                            select_queue_fallback_t fallback)
 {
        struct vnet *vp = netdev_priv(dev);
        struct vnet_port *port = __tx_port_find(vp, skb);
index 163d8d16bc245b48a10390d7706b21fd603489a4..dc966ddb6d815038b487f2edd05279e9397c30e7 100644 (file)
@@ -1151,7 +1151,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
        struct rx_map *dm;
        struct rxf_fifo *f;
        struct rxdb *db;
-       struct sk_buff *skb;
        int delta;
 
        ENTER;
@@ -1161,7 +1160,6 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
        DBG("db=%p f=%p\n", db, f);
        dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
        DBG("dm=%p\n", dm);
-       skb = dm->skb;
        rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
        rxfd->info = CPU_CHIP_SWAP32(0x10003);  /* INFO=1 BC=3 */
        rxfd->va_lo = rxdd->va_lo;
index 358edab9e72eeee18b9c17d74e66f2de92d5cc87..00761fe59848de7ccc90f91f45128ceb61927905 100644 (file)
@@ -253,23 +253,24 @@ struct cpsw_ss_regs {
 #define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */
 
 /* Bit definitions for the CPSW2_CONTROL register */
-#define PASS_PRI_TAGGED     (1<<24) /* Pass Priority Tagged */
-#define VLAN_LTYPE2_EN      (1<<21) /* VLAN LTYPE 2 enable */
-#define VLAN_LTYPE1_EN      (1<<20) /* VLAN LTYPE 1 enable */
-#define DSCP_PRI_EN         (1<<16) /* DSCP Priority Enable */
-#define TS_320              (1<<14) /* Time Sync Dest Port 320 enable */
-#define TS_319              (1<<13) /* Time Sync Dest Port 319 enable */
-#define TS_132              (1<<12) /* Time Sync Dest IP Addr 132 enable */
-#define TS_131              (1<<11) /* Time Sync Dest IP Addr 131 enable */
-#define TS_130              (1<<10) /* Time Sync Dest IP Addr 130 enable */
-#define TS_129              (1<<9)  /* Time Sync Dest IP Addr 129 enable */
-#define TS_TTL_NONZERO      (1<<8)  /* Time Sync Time To Live Non-zero enable */
-#define TS_ANNEX_F_EN       (1<<6)  /* Time Sync Annex F enable */
-#define TS_ANNEX_D_EN       (1<<4)  /* Time Sync Annex D enable */
-#define TS_LTYPE2_EN        (1<<3)  /* Time Sync LTYPE 2 enable */
-#define TS_LTYPE1_EN        (1<<2)  /* Time Sync LTYPE 1 enable */
-#define TS_TX_EN            (1<<1)  /* Time Sync Transmit Enable */
-#define TS_RX_EN            (1<<0)  /* Time Sync Receive Enable */
+#define PASS_PRI_TAGGED     BIT(24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN      BIT(21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN      BIT(20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN         BIT(16) /* DSCP Priority Enable */
+#define TS_107              BIT(15) /* Tyme Sync Dest IP Address 107 */
+#define TS_320              BIT(14) /* Time Sync Dest Port 320 enable */
+#define TS_319              BIT(13) /* Time Sync Dest Port 319 enable */
+#define TS_132              BIT(12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131              BIT(11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130              BIT(10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129              BIT(9)  /* Time Sync Dest IP Addr 129 enable */
+#define TS_TTL_NONZERO      BIT(8)  /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN       BIT(6)  /* Time Sync Annex F enable */
+#define TS_ANNEX_D_EN       BIT(4)  /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN        BIT(3)  /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN        BIT(2)  /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN            BIT(1)  /* Time Sync Transmit Enable */
+#define TS_RX_EN            BIT(0)  /* Time Sync Receive Enable */
 
 #define CTRL_V2_TS_BITS \
        (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
@@ -281,7 +282,7 @@ struct cpsw_ss_regs {
 
 
 #define CTRL_V3_TS_BITS \
-       (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+       (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
         TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
         TS_LTYPE1_EN)
 
@@ -2927,7 +2928,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
                dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
                         priv_sl2->mac_addr);
        } else {
-               random_ether_addr(priv_sl2->mac_addr);
+               eth_random_addr(priv_sl2->mac_addr);
                dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
                         priv_sl2->mac_addr);
        }
index 6f63c8729afca1cf4e621bd1b18763850f33ffd3..b4ea58dc8caf878c231e5df845cbfc27357580c4 100644 (file)
@@ -114,7 +114,10 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
                        dev_consume_skb_any(skb);
                        dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
                                mtype, seqid);
-               } else if (time_after(jiffies, skb_cb->tmo)) {
+                       break;
+               }
+
+               if (time_after(jiffies, skb_cb->tmo)) {
                        /* timeout any expired skbs over 1s */
                        dev_dbg(cpts->dev,
                                "expiring tx timestamp mtype %u seqid %04x\n",
index cdbddf16dd2931ba66df103c064705d5f0aef350..4f1267477aa4b56b7f3e1d19420302728da56e7d 100644 (file)
@@ -205,7 +205,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
  * abstract out these details
  */
-int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
+static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
 {
        struct cpdma_params *cpdma_params = &ctlr->params;
        struct cpdma_desc_pool *pool;
index 06d7c9e4dcda92deb027522dc04b34326c9fdc8a..f270beebb4289326baff5e86b33f47eae2eaa49b 100644 (file)
@@ -1385,6 +1385,15 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
                return -EOPNOTSUPP;
 }
 
+static int match_first_device(struct device *dev, void *data)
+{
+       if (dev->parent && dev->parent->of_node)
+               return of_device_is_compatible(dev->parent->of_node,
+                                              "ti,davinci_mdio");
+
+       return !strncmp(dev_name(dev), "davinci_mdio", 12);
+}
+
 /**
  * emac_dev_open - EMAC device open
  * @ndev: The DaVinci EMAC network adapter
@@ -1484,8 +1493,14 @@ static int emac_dev_open(struct net_device *ndev)
 
        /* use the first phy on the bus if pdata did not give us a phy id */
        if (!phydev && !priv->phy_id) {
-               phy = bus_find_device_by_name(&mdio_bus_type, NULL,
-                                             "davinci_mdio");
+               /* NOTE: we can't use bus_find_device_by_name() here because
+                * the device name is not guaranteed to be 'davinci_mdio'. On
+                * some systems it can be 'davinci_mdio.0' so we need to use
+                * strncmp() against the first part of the string to correctly
+                * match it.
+                */
+               phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+                                     match_first_device);
                if (phy) {
                        priv->phy_id = dev_name(phy);
                        if (!priv->phy_id || !*priv->phy_id)
index e40aa3e31af2345489a4dbc896593c7dc0f574cd..a1d335a3c5e43884348b5d96a9bc9eaed92f0bcb 100644 (file)
@@ -1889,13 +1889,6 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
        return err;
 }
 
-static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
-                             void *accel_priv,
-                             select_queue_fallback_t fallback)
-{
-       return 0;
-}
-
 static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
                          void *type_data)
 {
@@ -1972,7 +1965,7 @@ static const struct net_device_ops netcp_netdev_ops = {
        .ndo_vlan_rx_add_vid    = netcp_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = netcp_rx_kill_vid,
        .ndo_tx_timeout         = netcp_ndo_tx_timeout,
-       .ndo_select_queue       = netcp_select_queue,
+       .ndo_select_queue       = dev_pick_tx_zero,
        .ndo_setup_tc           = netcp_setup_tc,
 };
 
@@ -2052,7 +2045,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
                if (is_valid_ether_addr(efuse_mac_addr))
                        ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
                else
-                       random_ether_addr(ndev->dev_addr);
+                       eth_random_addr(ndev->dev_addr);
 
                devm_iounmap(dev, efuse);
                devm_release_mem_region(dev, res.start, size);
@@ -2061,7 +2054,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
                if (mac_addr)
                        ether_addr_copy(ndev->dev_addr, mac_addr);
                else
-                       random_ether_addr(ndev->dev_addr);
+                       eth_random_addr(ndev->dev_addr);
        }
 
        ret = of_property_read_string(node_interface, "rx-channel",
index 2a0c06e0f730c35e5e7d40d9d9893bf36cf9c6fa..42f1f518dad6939300905f8b6218f4f31fabd212 100644 (file)
@@ -70,7 +70,8 @@
 #define XEL_TSR_XMIT_IE_MASK    0x00000008     /* Tx interrupt enable bit */
 #define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000    /* Buffer is active, SW bit
                                                 * only. This is not documented
-                                                * in the HW spec */
+                                                * in the HW spec
+                                                */
 
 /* Define for programming the MAC address into the EmacLite */
 #define XEL_TSR_PROG_MAC_ADDR  (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
 
 
 
-#define TX_TIMEOUT             (60*HZ)         /* Tx timeout is 60 seconds. */
+#define TX_TIMEOUT             (60 * HZ)       /* Tx timeout is 60 seconds. */
 #define ALIGNMENT              4
 
 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT)
 
 #ifdef __BIG_ENDIAN
 #define xemaclite_readl                ioread32be
@@ -238,8 +239,8 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
 
                /* Set up to output the remaining data */
                align_buffer = 0;
-               to_u8_ptr = (u8 *) &align_buffer;
-               from_u8_ptr = (u8 *) from_u16_ptr;
+               to_u8_ptr = (u8 *)&align_buffer;
+               from_u8_ptr = (u8 *)from_u16_ptr;
 
                /* Output the remaining data */
                for (; length > 0; length--)
@@ -272,7 +273,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
        u32 align_buffer;
 
        from_u32_ptr = src_ptr;
-       to_u16_ptr = (u16 *) dest_ptr;
+       to_u16_ptr = (u16 *)dest_ptr;
 
        for (; length > 3; length -= 4) {
                /* Copy each word into the temporary buffer */
@@ -288,9 +289,9 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
                u8 *to_u8_ptr, *from_u8_ptr;
 
                /* Set up to read the remaining data */
-               to_u8_ptr = (u8 *) to_u16_ptr;
+               to_u8_ptr = (u8 *)to_u16_ptr;
                align_buffer = *from_u32_ptr++;
-               from_u8_ptr = (u8 *) &align_buffer;
+               from_u8_ptr = (u8 *)&align_buffer;
 
                /* Read the remaining data */
                for (; length > 0; length--)
@@ -336,7 +337,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
                        drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
        } else if (drvdata->tx_ping_pong != 0) {
                /* If the expected buffer is full, try the other buffer,
-                * if it is configured in HW */
+                * if it is configured in HW
+                */
 
                addr = (void __iomem __force *)((u32 __force)addr ^
                                                 XEL_BUFFER_OFFSET);
@@ -349,7 +351,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
                return -1; /* Buffer was full, return failure */
 
        /* Write the frame to the buffer */
-       xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
+       xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
 
        xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
                         addr + XEL_TPLR_OFFSET);
@@ -357,7 +359,8 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
        /* Update the Tx Status Register to indicate that there is a
         * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
         * is used by the interrupt handler to check whether a frame
-        * has been transmitted */
+        * has been transmitted
+        */
        reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
        reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
        xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
@@ -369,6 +372,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
  * xemaclite_recv_data - Receive a frame
  * @drvdata:   Pointer to the Emaclite device private data
  * @data:      Address where the data is to be received
+ * @maxlen:    Maximum supported ethernet packet length
  *
  * This function is intended to be called from the interrupt context or
  * with a wrapper which waits for the receive frame to be available.
@@ -394,7 +398,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
                /* The instance is out of sync, try other buffer if other
                 * buffer is configured, return 0 otherwise. If the instance is
                 * out of sync, do not update the 'next_rx_buf_to_use' since it
-                * will correct on subsequent calls */
+                * will correct on subsequent calls
+                */
                if (drvdata->rx_ping_pong != 0)
                        addr = (void __iomem __force *)((u32 __force)addr ^
                                                         XEL_BUFFER_OFFSET);
@@ -408,13 +413,15 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
                        return 0;       /* No data was available */
        }
 
-       /* Get the protocol type of the ethernet frame that arrived */
+       /* Get the protocol type of the ethernet frame that arrived
+        */
        proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
                        XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
                        XEL_RPLR_LENGTH_MASK);
 
        /* Check if received ethernet frame is a raw ethernet frame
-        * or an IP packet or an ARP packet */
+        * or an IP packet or an ARP packet
+        */
        if (proto_type > ETH_DATA_LEN) {
 
                if (proto_type == ETH_P_IP) {
@@ -430,7 +437,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
                        length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
                else
                        /* Field contains type other than IP or ARP, use max
-                        * frame size and let user parse it */
+                        * frame size and let user parse it
+                        */
                        length = ETH_FRAME_LEN + ETH_FCS_LEN;
        } else
                /* Use the length in the frame, plus the header and trailer */
@@ -440,7 +448,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
                length = maxlen;
 
        /* Read from the EmacLite device */
-       xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
+       xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
                                data, length);
 
        /* Acknowledge the frame */
@@ -471,7 +479,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
        /* Determine the expected Tx buffer address */
        addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
 
-       xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
+       xemaclite_aligned_write(address_ptr, (u32 __force *)addr, ETH_ALEN);
 
        xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
 
@@ -488,7 +496,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
 /**
  * xemaclite_set_mac_address - Set the MAC address for this device
  * @dev:       Pointer to the network device instance
- * @addr:      Void pointer to the sockaddr structure
+ * @address:   Void pointer to the sockaddr structure
  *
  * This function copies the HW address from the sockaddr strucutre to the
  * net_device structure and updates the address in HW.
@@ -564,19 +572,19 @@ static void xemaclite_tx_handler(struct net_device *dev)
        struct net_local *lp = netdev_priv(dev);
 
        dev->stats.tx_packets++;
-       if (lp->deferred_skb) {
-               if (xemaclite_send_data(lp,
-                                       (u8 *) lp->deferred_skb->data,
-                                       lp->deferred_skb->len) != 0)
-                       return;
-               else {
-                       dev->stats.tx_bytes += lp->deferred_skb->len;
-                       dev_kfree_skb_irq(lp->deferred_skb);
-                       lp->deferred_skb = NULL;
-                       netif_trans_update(dev); /* prevent tx timeout */
-                       netif_wake_queue(dev);
-               }
-       }
+
+       if (!lp->deferred_skb)
+               return;
+
+       if (xemaclite_send_data(lp, (u8 *)lp->deferred_skb->data,
+                               lp->deferred_skb->len))
+               return;
+
+       dev->stats.tx_bytes += lp->deferred_skb->len;
+       dev_kfree_skb_irq(lp->deferred_skb);
+       lp->deferred_skb = NULL;
+       netif_trans_update(dev); /* prevent tx timeout */
+       netif_wake_queue(dev);
 }
 
 /**
@@ -602,18 +610,18 @@ static void xemaclite_rx_handler(struct net_device *dev)
                return;
        }
 
-       /*
-        * A new skb should have the data halfword aligned, but this code is
+       /* A new skb should have the data halfword aligned, but this code is
         * here just in case that isn't true. Calculate how many
         * bytes we should reserve to get the data to start on a word
-        * boundary */
+        * boundary
+        */
        align = BUFFER_ALIGN(skb->data);
        if (align)
                skb_reserve(skb, align);
 
        skb_reserve(skb, 2);
 
-       len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
+       len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
 
        if (!len) {
                dev->stats.rx_errors++;
@@ -639,6 +647,8 @@ static void xemaclite_rx_handler(struct net_device *dev)
  * @dev_id:    Void pointer to the network device instance used as callback
  *             reference
  *
+ * Return:     IRQ_HANDLED
+ *
  * This function handles the Tx and Rx interrupts of the EmacLite device.
  */
 static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
@@ -706,8 +716,8 @@ static int xemaclite_mdio_wait(struct net_local *lp)
        unsigned long end = jiffies + 2;
 
        /* wait for the MDIO interface to not be busy or timeout
-          after some time.
-       */
+        * after some time.
+        */
        while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
                        XEL_MDIOCTRL_MDIOSTS_MASK) {
                if (time_before_eq(end, jiffies)) {
@@ -757,7 +767,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
        rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
 
        dev_dbg(&lp->ndev->dev,
-               "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
+               "%s(phy_id=%i, reg=%x) == %x\n", __func__,
                phy_id, reg, rc);
 
        return rc;
@@ -772,6 +782,8 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
  *
  * This function waits till the device is ready to accept a new MDIO
  * request and then writes the val to the MDIO Write Data register.
+ *
+ * Return:      0 upon success or a negative error upon failure
  */
 static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
                                u16 val)
@@ -780,7 +792,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
        u32 ctrl_reg;
 
        dev_dbg(&lp->ndev->dev,
-               "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+               "%s(phy_id=%i, reg=%x, val=%x)\n", __func__,
                phy_id, reg, val);
 
        if (xemaclite_mdio_wait(lp))
@@ -805,7 +817,7 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
 /**
  * xemaclite_mdio_setup - Register mii_bus for the Emaclite device
  * @lp:                Pointer to the Emaclite device private data
- * @ofdev:     Pointer to OF device structure
+ * @dev:       Pointer to OF device structure
  *
  * This function enables MDIO bus in the Emaclite device and registers a
  * mii_bus.
@@ -905,6 +917,9 @@ static void xemaclite_adjust_link(struct net_device *ndev)
  * This function sets the MAC address, requests an IRQ and enables interrupts
  * for the Emaclite device and starts the Tx queue.
  * It also connects to the phy device, if MDIO is included in Emaclite device.
+ *
+ * Return:     0 on success. -ENODEV, if PHY cannot be connected.
+ *             Non-zero error value on failure.
  */
 static int xemaclite_open(struct net_device *dev)
 {
@@ -975,6 +990,8 @@ static int xemaclite_open(struct net_device *dev)
  * This function stops the Tx queue, disables interrupts and frees the IRQ for
  * the Emaclite device.
  * It also disconnects the phy device associated with the Emaclite device.
+ *
+ * Return:     0, always.
  */
 static int xemaclite_close(struct net_device *dev)
 {
@@ -1017,10 +1034,11 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
        new_skb = orig_skb;
 
        spin_lock_irqsave(&lp->reset_lock, flags);
-       if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
+       if (xemaclite_send_data(lp, (u8 *)new_skb->data, len) != 0) {
                /* If the Emaclite Tx buffer is busy, stop the Tx queue and
                 * defer the skb for transmission during the ISR, after the
-                * current transmission is complete */
+                * current transmission is complete
+                */
                netif_stop_queue(dev);
                lp->deferred_skb = new_skb;
                /* Take the time stamp now, since we can't do this in an ISR. */
@@ -1052,13 +1070,12 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
 {
        u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
 
-       if (p) {
-               return (bool)*p;
-       } else {
-               dev_warn(&ofdev->dev, "Parameter %s not found,"
-                       "defaulting to false\n", s);
+       if (!p) {
+               dev_warn(&ofdev->dev, "Parameter %s not found, defaulting to false\n", s);
                return false;
        }
+
+       return (bool)*p;
 }
 
 static const struct net_device_ops xemaclite_netdev_ops;
@@ -1066,7 +1083,6 @@ static const struct net_device_ops xemaclite_netdev_ops;
 /**
  * xemaclite_of_probe - Probe method for the Emaclite device.
  * @ofdev:     Pointer to OF device structure
- * @match:     Pointer to the structure used for matching a device
  *
  * This function probes for the Emaclite device in the device tree.
  * It initializes the driver data structure and the hardware, sets the MAC
index 750954be5a7403d0a6c752649866b43546354f88..d3eae123904575aba430883ab7a0ba4d28b8b07c 100644 (file)
@@ -1395,8 +1395,8 @@ static void fjes_watch_unshare_task(struct work_struct *work)
 
        while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
               (wait_time < 3000)) {
-               for (epidx = 0; epidx < hw->max_epid; epidx++) {
-                       if (epidx == hw->my_epid)
+               for (epidx = 0; epidx < max_epid; epidx++) {
+                       if (epidx == my_epid)
                                continue;
 
                        is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
@@ -1453,8 +1453,8 @@ static void fjes_watch_unshare_task(struct work_struct *work)
        }
 
        if (hw->hw_info.buffer_unshare_reserve_bit) {
-               for (epidx = 0; epidx < hw->max_epid; epidx++) {
-                       if (epidx == hw->my_epid)
+               for (epidx = 0; epidx < max_epid; epidx++) {
+                       if (epidx == my_epid)
                                continue;
 
                        if (test_bit(epidx,
index 750eaa53bf0ce59429d524ba0658ad6f488a4ba0..6acb6b5718b94c1050fdbef60a4a556e27c2275e 100644 (file)
@@ -236,7 +236,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                }
                /* Update tunnel dst according to Geneve options. */
                ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
-                                       gnvh->options, gnvh->opt_len * 4);
+                                       gnvh->options, gnvh->opt_len * 4,
+                                       TUNNEL_GENEVE_OPT);
        } else {
                /* Drop packets w/ critical options,
                 * since we don't support any...
@@ -418,11 +419,12 @@ static int geneve_hlen(struct genevehdr *gh)
        return sizeof(*gh) + gh->opt_len * 4;
 }
 
-static struct sk_buff **geneve_gro_receive(struct sock *sk,
-                                          struct sk_buff **head,
-                                          struct sk_buff *skb)
+static struct sk_buff *geneve_gro_receive(struct sock *sk,
+                                         struct list_head *head,
+                                         struct sk_buff *skb)
 {
-       struct sk_buff *p, **pp = NULL;
+       struct sk_buff *pp = NULL;
+       struct sk_buff *p;
        struct genevehdr *gh, *gh2;
        unsigned int hlen, gh_len, off_gnv;
        const struct packet_offload *ptype;
@@ -449,7 +451,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
                        goto out;
        }
 
-       for (p = *head; p; p = p->next) {
+       list_for_each_entry(p, head, list) {
                if (!NAPI_GRO_CB(p)->same_flow)
                        continue;
 
@@ -476,7 +478,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
@@ -674,7 +676,8 @@ static void geneve_build_header(struct genevehdr *geneveh,
        geneveh->proto_type = htons(ETH_P_TEB);
        geneveh->rsvd2 = 0;
 
-       ip_tunnel_info_opts_get(geneveh->options, info);
+       if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
+               ip_tunnel_info_opts_get(geneveh->options, info);
 }
 
 static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
index 32f49c4ce45783aa507b8473f4a5ab60e495fa8f..d79a69dd2146d347b3f44892e2534fe06290ca4a 100644 (file)
@@ -878,10 +878,8 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
 
 static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
 {
-       unsigned char channel;
        int actual;
 
-       channel = cmd & SIXP_CHN_MASK;
        if ((cmd & SIXP_PRIO_DATA_MASK) != 0) {     /* idle ? */
 
        /* RX and DCD flags can only be set in the same prio command,
@@ -933,10 +931,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
 
 static void decode_std_command(struct sixpack *sp, unsigned char cmd)
 {
-       unsigned char checksum = 0, rest = 0, channel;
+       unsigned char checksum = 0, rest = 0;
        short i;
 
-       channel = cmd & SIXP_CHN_MASK;
        switch (cmd & SIXP_CMD_MASK) {     /* normal command */
        case SIXP_SEOF:
                if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) {
index f347fd9c5b28370f6452f042bb7f59c0ec8a3cd3..777fa59f5e0cd5abdfb8390ac358d09cf77636a1 100644 (file)
 static const char banner[] __initconst = KERN_INFO \
        "AX.25: bpqether driver version 004\n";
 
-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-static char bpq_eth_addr[6];
-
 static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
 static int bpq_device_event(struct notifier_block *, unsigned long, void *);
 
@@ -501,8 +497,8 @@ static int bpq_new_device(struct net_device *edev)
        bpq->ethdev = edev;
        bpq->axdev = ndev;
 
-       memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
-       memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
+       eth_broadcast_addr(bpq->dest_addr);
+       eth_broadcast_addr(bpq->acpt_addr);
 
        err = register_netdevice(ndev);
        if (err)
index 1a924b867b0742b0aa3e5a15f4da3e6885173e74..4b6e308199d270cd455b7df0de20a8458f6b7941 100644 (file)
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net,
 void netvsc_channel_cb(void *context);
 int netvsc_poll(struct napi_struct *napi, int budget);
 
-void rndis_set_subchannel(struct work_struct *w);
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
 int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
index 5d5bd513847fff4ff353e7c58d9967a354d06955..8e9d0ee1572b97120546171f2487240e6e21bc43 100644 (file)
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
                               VM_PKT_DATA_INBAND, 0);
 }
 
+/* Worker to setup sub channels on initial setup
+ * Initial hotplug event occurs in softirq context
+ * and can't wait for channels.
+ */
+static void netvsc_subchan_work(struct work_struct *w)
+{
+       struct netvsc_device *nvdev =
+               container_of(w, struct netvsc_device, subchan_work);
+       struct rndis_device *rdev;
+       int i, ret;
+
+       /* Avoid deadlock with device removal already under RTNL */
+       if (!rtnl_trylock()) {
+               schedule_work(w);
+               return;
+       }
+
+       rdev = nvdev->extension;
+       if (rdev) {
+               ret = rndis_set_subchannel(rdev->ndev, nvdev);
+               if (ret == 0) {
+                       netif_device_attach(rdev->ndev);
+               } else {
+                       /* fallback to only primary channel */
+                       for (i = 1; i < nvdev->num_chn; i++)
+                               netif_napi_del(&nvdev->chan_table[i].napi);
+
+                       nvdev->max_chn = 1;
+                       nvdev->num_chn = 1;
+               }
+       }
+
+       rtnl_unlock();
+}
+
 static struct netvsc_device *alloc_net_device(void)
 {
        struct netvsc_device *net_device;
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void)
 
        init_completion(&net_device->channel_init_wait);
        init_waitqueue_head(&net_device->subchan_open);
-       INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
+       INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
 
        return net_device;
 }
index fe2256bf1d137fea6b76c5e3a564b191e2b5da7c..cf4f40a04194a7fd31e619f41b56a0878186dab1 100644 (file)
@@ -329,7 +329,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
 }
 
 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
-                              void *accel_priv,
+                              struct net_device *sb_dev,
                               select_queue_fallback_t fallback)
 {
        struct net_device_context *ndc = netdev_priv(ndev);
@@ -343,9 +343,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
 
                if (vf_ops->ndo_select_queue)
                        txq = vf_ops->ndo_select_queue(vf_netdev, skb,
-                                                      accel_priv, fallback);
+                                                      sb_dev, fallback);
                else
-                       txq = fallback(vf_netdev, skb);
+                       txq = fallback(vf_netdev, skb, NULL);
 
                /* Record the queue selected by VF so that it can be
                 * used for common case where VF has more queues than
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
        if (IS_ERR(nvdev))
                return PTR_ERR(nvdev);
 
-       /* Note: enable and attach happen when sub-channels setup */
+       if (nvdev->num_chn > 1) {
+               ret = rndis_set_subchannel(ndev, nvdev);
+
+               /* if unavailable, just proceed with one queue */
+               if (ret) {
+                       nvdev->max_chn = 1;
+                       nvdev->num_chn = 1;
+               }
+       }
+
+       /* In any case device is now ready */
+       netif_device_attach(ndev);
 
+       /* Note: enable and attach happen when sub-channels setup */
        netif_carrier_off(ndev);
 
        if (netif_running(ndev)) {
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev,
 
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
+       if (nvdev->num_chn > 1)
+               schedule_work(&nvdev->subchan_work);
+
        /* hw_features computed in rndis_netdev_set_hwcaps() */
        net->features = net->hw_features |
                NETIF_F_HIGHDMA | NETIF_F_SG |
index 5428bb26110262fdfb66daaac8463c91e7981d42..9b4e3c3787e5d33ff200a9711b1373b765b6052e 100644 (file)
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
  * This breaks overlap of processing the host message for the
  * new primary channel with the initialization of sub-channels.
  */
-void rndis_set_subchannel(struct work_struct *w)
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
 {
-       struct netvsc_device *nvdev
-               = container_of(w, struct netvsc_device, subchan_work);
        struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
-       struct net_device_context *ndev_ctx;
-       struct rndis_device *rdev;
-       struct net_device *ndev;
-       struct hv_device *hv_dev;
+       struct net_device_context *ndev_ctx = netdev_priv(ndev);
+       struct hv_device *hv_dev = ndev_ctx->device_ctx;
+       struct rndis_device *rdev = nvdev->extension;
        int i, ret;
 
-       if (!rtnl_trylock()) {
-               schedule_work(w);
-               return;
-       }
-
-       rdev = nvdev->extension;
-       if (!rdev)
-               goto unlock;    /* device was removed */
-
-       ndev = rdev->ndev;
-       ndev_ctx = netdev_priv(ndev);
-       hv_dev = ndev_ctx->device_ctx;
+       ASSERT_RTNL();
 
        memset(init_packet, 0, sizeof(struct nvsp_message));
        init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w)
                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        if (ret) {
                netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
-               goto failed;
+               return ret;
        }
 
        wait_for_completion(&nvdev->channel_init_wait);
        if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
                netdev_err(ndev, "sub channel request failed\n");
-               goto failed;
+               return -EIO;
        }
 
        nvdev->num_chn = 1 +
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w)
        for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
                ndev_ctx->tx_table[i] = i % nvdev->num_chn;
 
-       netif_device_attach(ndev);
-       rtnl_unlock();
-       return;
-
-failed:
-       /* fallback to only primary channel */
-       for (i = 1; i < nvdev->num_chn; i++)
-               netif_napi_del(&nvdev->chan_table[i].napi);
-
-       nvdev->max_chn = 1;
-       nvdev->num_chn = 1;
-
-       netif_device_attach(ndev);
-unlock:
-       rtnl_unlock();
+       return 0;
 }
 
 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
@@ -1360,21 +1332,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
                netif_napi_add(net, &net_device->chan_table[i].napi,
                               netvsc_poll, NAPI_POLL_WEIGHT);
 
-       if (net_device->num_chn > 1)
-               schedule_work(&net_device->subchan_work);
+       return net_device;
 
 out:
-       /* if unavailable, just proceed with one queue */
-       if (ret) {
-               net_device->max_chn = 1;
-               net_device->num_chn = 1;
-       }
-
-       /* No sub channels, device is ready */
-       if (net_device->num_chn == 1)
-               netif_device_attach(net);
-
-       return net_device;
+       /* setting up multiple channels failed */
+       net_device->max_chn = 1;
+       net_device->num_chn = 1;
 
 err_dev_remv:
        rndis_filter_device_remove(dev, net_device);
index 4377c26f714d0522ebf5d1de6ac774b6e42024ea..4a949569ec4c51668fe7b795caef7ece5d61854b 100644 (file)
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
 {
        struct ipvl_dev *ipvlan;
        struct net_device *mdev = port->dev;
-       int err = 0;
+       unsigned int flags;
+       int err;
 
        ASSERT_RTNL();
        if (port->mode != nval) {
+               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+                       flags = ipvlan->dev->flags;
+                       if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
+                               err = dev_change_flags(ipvlan->dev,
+                                                      flags | IFF_NOARP);
+                       } else {
+                               err = dev_change_flags(ipvlan->dev,
+                                                      flags & ~IFF_NOARP);
+                       }
+                       if (unlikely(err))
+                               goto fail;
+               }
                if (nval == IPVLAN_MODE_L3S) {
                        /* New mode is L3S */
                        err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
                                mdev->l3mdev_ops = &ipvl_l3mdev_ops;
                                mdev->priv_flags |= IFF_L3MDEV_MASTER;
                        } else
-                               return err;
+                               goto fail;
                } else if (port->mode == IPVLAN_MODE_L3S) {
                        /* Old mode was L3S */
                        mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
                        ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
                        mdev->l3mdev_ops = NULL;
                }
-               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
-                       if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
-                               ipvlan->dev->flags |= IFF_NOARP;
-                       else
-                               ipvlan->dev->flags &= ~IFF_NOARP;
-               }
                port->mode = nval;
        }
+       return 0;
+
+fail:
+       /* Undo the flags changes that have been done so far. */
+       list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
+               flags = ipvlan->dev->flags;
+               if (port->mode == IPVLAN_MODE_L3 ||
+                   port->mode == IPVLAN_MODE_L3S)
+                       dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
+               else
+                       dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
+       }
+
        return err;
 }
 
@@ -594,7 +614,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
        ipvlan->phy_dev = phy_dev;
        ipvlan->dev = dev;
        ipvlan->sfeatures = IPVLAN_FEATURES;
-       ipvlan_adjust_mtu(ipvlan, phy_dev);
+       if (!tb[IFLA_MTU])
+               ipvlan_adjust_mtu(ipvlan, phy_dev);
        INIT_LIST_HEAD(&ipvlan->addrs);
        spin_lock_init(&ipvlan->addrs_lock);
 
@@ -693,6 +714,7 @@ void ipvlan_link_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
+       dev->max_mtu = ETH_MAX_MTU;
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
        dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
        dev->netdev_ops = &ipvlan_netdev_ops;
index adde8fc45588ba12c82bd79cf830e3ed99b907e3..cfda146f3b3bbb799532a48e2705c2fd4b5f2661 100644 (file)
@@ -514,7 +514,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
        const struct macvlan_dev *vlan = netdev_priv(dev);
        const struct macvlan_port *port = vlan->port;
        const struct macvlan_dev *dest;
-       void *accel_priv = NULL;
 
        if (vlan->mode == MACVLAN_MODE_BRIDGE) {
                const struct ethhdr *eth = (void *)skb->data;
@@ -533,15 +532,10 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
                        return NET_XMIT_SUCCESS;
                }
        }
-
-       /* For packets that are non-multicast and not bridged we will pass
-        * the necessary information so that the lowerdev can distinguish
-        * the source of the packets via the accel_priv value.
-        */
-       accel_priv = vlan->accel_priv;
 xmit_world:
        skb->dev = vlan->lowerdev;
-       return dev_queue_xmit_accel(skb, accel_priv);
+       return dev_queue_xmit_accel(skb,
+                                   netdev_get_sb_channel(dev) ? dev : NULL);
 }
 
 static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
@@ -1647,6 +1641,7 @@ static int macvlan_device_event(struct notifier_block *unused,
 
        switch (event) {
        case NETDEV_UP:
+       case NETDEV_DOWN:
        case NETDEV_CHANGE:
                list_for_each_entry(vlan, &port->vlans, list)
                        netif_stacked_transfer_operstate(vlan->lowerdev,
index 83f7420ddea569126db0cc25719940892d760075..d00d42c845b76207afa21fe5d45686fd91323b30 100644 (file)
@@ -115,7 +115,8 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
 }
 
 static u16 net_failover_select_queue(struct net_device *dev,
-                                    struct sk_buff *skb, void *accel_priv,
+                                    struct sk_buff *skb,
+                                    struct net_device *sb_dev,
                                     select_queue_fallback_t fallback)
 {
        struct net_failover_info *nfo_info = netdev_priv(dev);
@@ -128,9 +129,9 @@ static u16 net_failover_select_queue(struct net_device *dev,
 
                if (ops->ndo_select_queue)
                        txq = ops->ndo_select_queue(primary_dev, skb,
-                                                   accel_priv, fallback);
+                                                   sb_dev, fallback);
                else
-                       txq = fallback(primary_dev, skb);
+                       txq = fallback(primary_dev, skb, NULL);
 
                qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
 
@@ -527,7 +528,7 @@ static int net_failover_slave_register(struct net_device *slave_dev,
 
        netif_addr_lock_bh(failover_dev);
        dev_uc_sync_multiple(slave_dev, failover_dev);
-       dev_uc_sync_multiple(slave_dev, failover_dev);
+       dev_mc_sync_multiple(slave_dev, failover_dev);
        netif_addr_unlock_bh(failover_dev);
 
        err = vlan_vids_add_by_dev(slave_dev, failover_dev);
index 449b2a1a18007e49c8d55993433cf865db893577..0fee1d06c0848f274baedb51ef30594cf4358332 100644 (file)
@@ -13,3 +13,7 @@ endif
 ifneq ($(CONFIG_NET_DEVLINK),)
 netdevsim-objs += devlink.o fib.o
 endif
+
+ifneq ($(CONFIG_XFRM_OFFLOAD),)
+netdevsim-objs += ipsec.o
+endif
index 75c25306d2347d34ee7f6847df1cd9e2dc866a40..c36d2a768202a2bb91f344b1dc9eb5e87e26ea69 100644 (file)
@@ -92,7 +92,7 @@ static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
 
 static bool nsim_xdp_offload_active(struct netdevsim *ns)
 {
-       return ns->xdp_prog_mode == XDP_ATTACHED_HW;
+       return ns->xdp_hw.prog;
 }
 
 static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
@@ -195,14 +195,14 @@ static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
        return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns));
 }
 
-static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+static int
+nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
+                 struct xdp_attachment_info *xdp)
 {
        int err;
 
-       if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) {
-               NSIM_EA(bpf->extack, "program loaded with different flags");
+       if (!xdp_attachment_flags_ok(xdp, bpf))
                return -EBUSY;
-       }
 
        if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
                NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
@@ -219,18 +219,7 @@ static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
                        return err;
        }
 
-       if (ns->xdp_prog)
-               bpf_prog_put(ns->xdp_prog);
-
-       ns->xdp_prog = bpf->prog;
-       ns->xdp_flags = bpf->flags;
-
-       if (!bpf->prog)
-               ns->xdp_prog_mode = XDP_ATTACHED_NONE;
-       else if (bpf->command == XDP_SETUP_PROG)
-               ns->xdp_prog_mode = XDP_ATTACHED_DRV;
-       else
-               ns->xdp_prog_mode = XDP_ATTACHED_HW;
+       xdp_attachment_setup(xdp, bpf);
 
        return 0;
 }
@@ -290,10 +279,6 @@ static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
                NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
                return -EINVAL;
        }
-       if (nsim_xdp_offload_active(ns)) {
-               NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog");
-               return -EBUSY;
-       }
        return 0;
 }
 
@@ -567,22 +552,21 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
                nsim_bpf_destroy_prog(bpf->offload.prog);
                return 0;
        case XDP_QUERY_PROG:
-               bpf->prog_attached = ns->xdp_prog_mode;
-               bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0;
-               bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0;
-               return 0;
+               return xdp_attachment_query(&ns->xdp, bpf);
+       case XDP_QUERY_PROG_HW:
+               return xdp_attachment_query(&ns->xdp_hw, bpf);
        case XDP_SETUP_PROG:
                err = nsim_setup_prog_checks(ns, bpf);
                if (err)
                        return err;
 
-               return nsim_xdp_set_prog(ns, bpf);
+               return nsim_xdp_set_prog(ns, bpf, &ns->xdp);
        case XDP_SETUP_PROG_HW:
                err = nsim_setup_prog_hw_checks(ns, bpf);
                if (err)
                        return err;
 
-               return nsim_xdp_set_prog(ns, bpf);
+               return nsim_xdp_set_prog(ns, bpf, &ns->xdp_hw);
        case BPF_OFFLOAD_MAP_ALLOC:
                if (!ns->bpf_map_accept)
                        return -EOPNOTSUPP;
@@ -637,6 +621,7 @@ void nsim_bpf_uninit(struct netdevsim *ns)
 {
        WARN_ON(!list_empty(&ns->bpf_bound_progs));
        WARN_ON(!list_empty(&ns->bpf_bound_maps));
-       WARN_ON(ns->xdp_prog);
+       WARN_ON(ns->xdp.prog);
+       WARN_ON(ns->xdp_hw.prog);
        WARN_ON(ns->bpf_offloaded);
 }
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
new file mode 100644 (file)
index 0000000..2dcf6cc
--- /dev/null
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
+
+#include <crypto/aead.h>
+#include <linux/debugfs.h>
+#include <net/xfrm.h>
+
+#include "netdevsim.h"
+
+#define NSIM_IPSEC_AUTH_BITS   128
+
+static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
+                                       char __user *buffer,
+                                       size_t count, loff_t *ppos)
+{
+       struct netdevsim *ns = filp->private_data;
+       struct nsim_ipsec *ipsec = &ns->ipsec;
+       size_t bufsize;
+       char *buf, *p;
+       int len;
+       int i;
+
+       /* the buffer needed is
+        * (num SAs * 3 lines each * ~60 bytes per line) + one more line
+        */
+       bufsize = (ipsec->count * 4 * 60) + 60;
+       buf = kzalloc(bufsize, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       p = buf;
+       p += snprintf(p, bufsize - (p - buf),
+                     "SA count=%u tx=%u\n",
+                     ipsec->count, ipsec->tx);
+
+       for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
+               struct nsim_sa *sap = &ipsec->sa[i];
+
+               if (!sap->used)
+                       continue;
+
+               p += snprintf(p, bufsize - (p - buf),
+                             "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
+                             i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
+                             sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
+               p += snprintf(p, bufsize - (p - buf),
+                             "sa[%i]    spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
+                             i, be32_to_cpu(sap->xs->id.spi),
+                             sap->xs->id.proto, sap->salt, sap->crypt);
+               p += snprintf(p, bufsize - (p - buf),
+                             "sa[%i]    key=0x%08x %08x %08x %08x\n",
+                             i, sap->key[0], sap->key[1],
+                             sap->key[2], sap->key[3]);
+       }
+
+       len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf);
+
+       kfree(buf);
+       return len;
+}
+
+static const struct file_operations ipsec_dbg_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .read = nsim_dbg_netdev_ops_read,
+};
+
+static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec)
+{
+       u32 i;
+
+       if (ipsec->count == NSIM_IPSEC_MAX_SA_COUNT)
+               return -ENOSPC;
+
+       /* search sa table */
+       for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
+               if (!ipsec->sa[i].used)
+                       return i;
+       }
+
+       return -ENOSPC;
+}
+
+static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
+                                      u32 *mykey, u32 *mysalt)
+{
+       const char aes_gcm_name[] = "rfc4106(gcm(aes))";
+       struct net_device *dev = xs->xso.dev;
+       unsigned char *key_data;
+       char *alg_name = NULL;
+       int key_len;
+
+       if (!xs->aead) {
+               netdev_err(dev, "Unsupported IPsec algorithm\n");
+               return -EINVAL;
+       }
+
+       if (xs->aead->alg_icv_len != NSIM_IPSEC_AUTH_BITS) {
+               netdev_err(dev, "IPsec offload requires %d bit authentication\n",
+                          NSIM_IPSEC_AUTH_BITS);
+               return -EINVAL;
+       }
+
+       key_data = &xs->aead->alg_key[0];
+       key_len = xs->aead->alg_key_len;
+       alg_name = xs->aead->alg_name;
+
+       if (strcmp(alg_name, aes_gcm_name)) {
+               netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
+                          aes_gcm_name);
+               return -EINVAL;
+       }
+
+       /* 160 accounts for 16 byte key and 4 byte salt */
+       if (key_len > NSIM_IPSEC_AUTH_BITS) {
+               *mysalt = ((u32 *)key_data)[4];
+       } else if (key_len == NSIM_IPSEC_AUTH_BITS) {
+               *mysalt = 0;
+       } else {
+               netdev_err(dev, "IPsec hw offload only supports 128 bit keys with optional 32 bit salt\n");
+               return -EINVAL;
+       }
+       memcpy(mykey, key_data, 16);
+
+       return 0;
+}
+
+static int nsim_ipsec_add_sa(struct xfrm_state *xs)
+{
+       struct nsim_ipsec *ipsec;
+       struct net_device *dev;
+       struct netdevsim *ns;
+       struct nsim_sa sa;
+       u16 sa_idx;
+       int ret;
+
+       dev = xs->xso.dev;
+       ns = netdev_priv(dev);
+       ipsec = &ns->ipsec;
+
+       if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+               netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
+                          xs->id.proto);
+               return -EINVAL;
+       }
+
+       if (xs->calg) {
+               netdev_err(dev, "Compression offload not supported\n");
+               return -EINVAL;
+       }
+
+       /* find the first unused index */
+       ret = nsim_ipsec_find_empty_idx(ipsec);
+       if (ret < 0) {
+               netdev_err(dev, "No space for SA in Rx table!\n");
+               return ret;
+       }
+       sa_idx = (u16)ret;
+
+       memset(&sa, 0, sizeof(sa));
+       sa.used = true;
+       sa.xs = xs;
+
+       if (sa.xs->id.proto & IPPROTO_ESP)
+               sa.crypt = xs->ealg || xs->aead;
+
+       /* get the key and salt */
+       ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt);
+       if (ret) {
+               netdev_err(dev, "Failed to get key data for SA table\n");
+               return ret;
+       }
+
+       if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+               sa.rx = true;
+
+               if (xs->props.family == AF_INET6)
+                       memcpy(sa.ipaddr, &xs->id.daddr.a6, 16);
+               else
+                       memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4);
+       }
+
+       /* the preparations worked, so save the info */
+       memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa));
+
+       /* the XFRM stack doesn't like offload_handle == 0,
+        * so add a bitflag in case our array index is 0
+        */
+       xs->xso.offload_handle = sa_idx | NSIM_IPSEC_VALID;
+       ipsec->count++;
+
+       return 0;
+}
+
+static void nsim_ipsec_del_sa(struct xfrm_state *xs)
+{
+       struct netdevsim *ns = netdev_priv(xs->xso.dev);
+       struct nsim_ipsec *ipsec = &ns->ipsec;
+       u16 sa_idx;
+
+       sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID;
+       if (!ipsec->sa[sa_idx].used) {
+               netdev_err(ns->netdev, "Invalid SA for delete sa_idx=%d\n",
+                          sa_idx);
+               return;
+       }
+
+       memset(&ipsec->sa[sa_idx], 0, sizeof(struct nsim_sa));
+       ipsec->count--;
+}
+
+static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+{
+       struct netdevsim *ns = netdev_priv(xs->xso.dev);
+       struct nsim_ipsec *ipsec = &ns->ipsec;
+
+       ipsec->ok++;
+
+       return true;
+}
+
+static const struct xfrmdev_ops nsim_xfrmdev_ops = {
+       .xdo_dev_state_add      = nsim_ipsec_add_sa,
+       .xdo_dev_state_delete   = nsim_ipsec_del_sa,
+       .xdo_dev_offload_ok     = nsim_ipsec_offload_ok,
+};
+
+bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
+{
+       struct nsim_ipsec *ipsec = &ns->ipsec;
+       struct xfrm_state *xs;
+       struct nsim_sa *tsa;
+       u32 sa_idx;
+
+       /* do we even need to check this packet? */
+       if (!skb->sp)
+               return true;
+
+       if (unlikely(!skb->sp->len)) {
+               netdev_err(ns->netdev, "no xfrm state len = %d\n",
+                          skb->sp->len);
+               return false;
+       }
+
+       xs = xfrm_input_state(skb);
+       if (unlikely(!xs)) {
+               netdev_err(ns->netdev, "no xfrm_input_state() xs = %p\n", xs);
+               return false;
+       }
+
+       sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID;
+       if (unlikely(sa_idx >= NSIM_IPSEC_MAX_SA_COUNT)) {
+               netdev_err(ns->netdev, "bad sa_idx=%d max=%d\n",
+                          sa_idx, NSIM_IPSEC_MAX_SA_COUNT);
+               return false;
+       }
+
+       tsa = &ipsec->sa[sa_idx];
+       if (unlikely(!tsa->used)) {
+               netdev_err(ns->netdev, "unused sa_idx=%d\n", sa_idx);
+               return false;
+       }
+
+       if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+               netdev_err(ns->netdev, "unexpected proto=%d\n", xs->id.proto);
+               return false;
+       }
+
+       ipsec->tx++;
+
+       return true;
+}
+
+void nsim_ipsec_init(struct netdevsim *ns)
+{
+       ns->netdev->xfrmdev_ops = &nsim_xfrmdev_ops;
+
+#define NSIM_ESP_FEATURES      (NETIF_F_HW_ESP | \
+                                NETIF_F_HW_ESP_TX_CSUM | \
+                                NETIF_F_GSO_ESP)
+
+       ns->netdev->features |= NSIM_ESP_FEATURES;
+       ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES;
+
+       ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, ns->ddir, ns,
+                                             &ipsec_dbg_fops);
+}
+
+void nsim_ipsec_teardown(struct netdevsim *ns)
+{
+       struct nsim_ipsec *ipsec = &ns->ipsec;
+
+       if (ipsec->count)
+               netdev_err(ns->netdev, "tearing down IPsec offload with %d SAs left\n",
+                          ipsec->count);
+       debugfs_remove_recursive(ipsec->pfile);
+}
index ec68f38213d9c31e4049893020e1dc2bb97eea3b..a7b179f0d95451ddb8f01300880077d019e717b3 100644 (file)
@@ -171,6 +171,8 @@ static int nsim_init(struct net_device *dev)
        if (err)
                goto err_unreg_dev;
 
+       nsim_ipsec_init(ns);
+
        return 0;
 
 err_unreg_dev:
@@ -186,6 +188,7 @@ static void nsim_uninit(struct net_device *dev)
 {
        struct netdevsim *ns = netdev_priv(dev);
 
+       nsim_ipsec_teardown(ns);
        nsim_devlink_teardown(ns);
        debugfs_remove_recursive(ns->ddir);
        nsim_bpf_uninit(ns);
@@ -203,11 +206,15 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct netdevsim *ns = netdev_priv(dev);
 
+       if (!nsim_ipsec_tx(ns, skb))
+               goto out;
+
        u64_stats_update_begin(&ns->syncp);
        ns->tx_packets++;
        ns->tx_bytes += skb->len;
        u64_stats_update_end(&ns->syncp);
 
+out:
        dev_kfree_skb(skb);
 
        return NETDEV_TX_OK;
@@ -221,8 +228,7 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct netdevsim *ns = netdev_priv(dev);
 
-       if (ns->xdp_prog_mode == XDP_ATTACHED_DRV &&
-           new_mtu > NSIM_XDP_MAX_MTU)
+       if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
                return -EBUSY;
 
        dev->mtu = new_mtu;
@@ -260,7 +266,7 @@ nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb,
-                                            ns, ns);
+                                            ns, ns, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns);
                return 0;
index 8ca50b72c3287f41130beb61da42cf1bec360177..0aeabbe81cc6fbe616188238a2ec3244766c4ccd 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/u64_stats_sync.h>
+#include <net/xdp.h>
 
 #define DRV_NAME       "netdevsim"
 
@@ -29,6 +30,27 @@ struct bpf_prog;
 struct dentry;
 struct nsim_vf_config;
 
+#define NSIM_IPSEC_MAX_SA_COUNT                33
+#define NSIM_IPSEC_VALID               BIT(31)
+
+struct nsim_sa {
+       struct xfrm_state *xs;
+       __be32 ipaddr[4];
+       u32 key[4];
+       u32 salt;
+       bool used;
+       bool crypt;
+       bool rx;
+};
+
+struct nsim_ipsec {
+       struct nsim_sa sa[NSIM_IPSEC_MAX_SA_COUNT];
+       struct dentry *pfile;
+       u32 count;
+       u32 tx;
+       u32 ok;
+};
+
 struct netdevsim {
        struct net_device *netdev;
 
@@ -46,9 +68,8 @@ struct netdevsim {
        struct bpf_prog *bpf_offloaded;
        u32 bpf_offloaded_id;
 
-       u32 xdp_flags;
-       int xdp_prog_mode;
-       struct bpf_prog *xdp_prog;
+       struct xdp_attachment_info xdp;
+       struct xdp_attachment_info xdp_hw;
 
        u32 prog_id_gen;
 
@@ -67,6 +88,7 @@ struct netdevsim {
 #if IS_ENABLED(CONFIG_NET_DEVLINK)
        struct devlink *devlink;
 #endif
+       struct nsim_ipsec ipsec;
 };
 
 extern struct dentry *nsim_ddir;
@@ -148,6 +170,25 @@ static inline void nsim_devlink_exit(void)
 }
 #endif
 
+#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
+void nsim_ipsec_init(struct netdevsim *ns);
+void nsim_ipsec_teardown(struct netdevsim *ns);
+bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb);
+#else
+static inline void nsim_ipsec_init(struct netdevsim *ns)
+{
+}
+
+static inline void nsim_ipsec_teardown(struct netdevsim *ns)
+{
+}
+
+static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
+{
+       return true;
+}
+#endif
+
 static inline struct netdevsim *to_nsim(struct device *ptr)
 {
        return container_of(ptr, struct netdevsim, dev);
index 9f6f7ccd44f775686dd4807d52b409be407026b9..b12023bc2cab5feb15ceedbe2fc357dfcf37627e 100644 (file)
@@ -430,7 +430,7 @@ static int ntb_netdev_probe(struct device *client_dev)
        ndev->hw_features = ndev->features;
        ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
 
-       random_ether_addr(ndev->perm_addr);
+       eth_random_addr(ndev->perm_addr);
        memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
 
        ndev->netdev_ops = &ntb_netdev_ops;
index 343989f9f9d981e201bedf66520f4be97567d6af..9beac427f9e87301c1c67d0b7e53fca7df4b516b 100644 (file)
@@ -92,7 +92,8 @@ config MDIO_CAVIUM
 
 config MDIO_GPIO
        tristate "GPIO lib-based bitbanged MDIO buses"
-       depends on MDIO_BITBANG && GPIOLIB
+       depends on MDIO_BITBANG
+       depends on GPIOLIB || COMPILE_TEST
        ---help---
          Supports GPIO lib-based MDIO busses.
 
@@ -214,6 +215,7 @@ config SFP
        tristate "SFP cage support"
        depends on I2C && PHYLINK
        select MDIO_I2C
+       imply HWMON
 
 config AMD_PHY
        tristate "AMD PHYs"
index 081d99aa39853097e7d486e813f344fb895598aa..78cad134a79ea3cff76683ff3d2e951d8792ea7d 100644 (file)
@@ -21,6 +21,7 @@
 #define MII_DP83811_SGMII_CTRL 0x09
 #define MII_DP83811_INT_STAT1  0x12
 #define MII_DP83811_INT_STAT2  0x13
+#define MII_DP83811_INT_STAT3  0x18
 #define MII_DP83811_RESET_CTRL 0x1f
 
 #define DP83811_HW_RESET       BIT(15)
 #define DP83811_OVERVOLTAGE_INT_EN     BIT(6)
 #define DP83811_UNDERVOLTAGE_INT_EN    BIT(7)
 
+/* INT_STAT3 bits */
+#define DP83811_LPS_INT_EN     BIT(0)
+#define DP83811_NO_FRAME_INT_EN        BIT(3)
+#define DP83811_POR_DONE_INT_EN        BIT(4)
+
 #define MII_DP83811_RXSOP1     0x04a5
 #define MII_DP83811_RXSOP2     0x04a6
 #define MII_DP83811_RXSOP3     0x04a7
@@ -81,6 +87,10 @@ static int dp83811_ack_interrupt(struct phy_device *phydev)
        if (err < 0)
                return err;
 
+       err = phy_read(phydev, MII_DP83811_INT_STAT3);
+       if (err < 0)
+               return err;
+
        return 0;
 }
 
@@ -216,13 +226,29 @@ static int dp83811_config_intr(struct phy_device *phydev)
                                DP83811_UNDERVOLTAGE_INT_EN);
 
                err = phy_write(phydev, MII_DP83811_INT_STAT2, misr_status);
+               if (err < 0)
+                       return err;
+
+               misr_status = phy_read(phydev, MII_DP83811_INT_STAT3);
+               if (misr_status < 0)
+                       return misr_status;
+
+               misr_status |= (DP83811_LPS_INT_EN |
+                               DP83811_NO_FRAME_INT_EN |
+                               DP83811_POR_DONE_INT_EN);
+
+               err = phy_write(phydev, MII_DP83811_INT_STAT3, misr_status);
 
        } else {
                err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
                if (err < 0)
                        return err;
 
-               err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
+               err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
+               if (err < 0)
+                       return err;
+
+               err = phy_write(phydev, MII_DP83811_INT_STAT3, 0);
        }
 
        return err;
@@ -258,21 +284,19 @@ static int dp83811_config_init(struct phy_device *phydev)
        if (err < 0)
                return err;
 
+       value = phy_read(phydev, MII_DP83811_SGMII_CTRL);
        if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
-               value = phy_read(phydev, MII_DP83811_SGMII_CTRL);
-               if (!(value & DP83811_SGMII_EN)) {
-                       err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
+               err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
                                        (DP83811_SGMII_EN | value));
-                       if (err < 0)
-                               return err;
-               } else {
-                       err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
-                                       (~DP83811_SGMII_EN & value));
-                       if (err < 0)
-                               return err;
-               }
+       } else {
+               err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
+                               (~DP83811_SGMII_EN & value));
        }
 
+       if (err < 0)
+
+               return err;
+
        value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN;
 
        return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
index 001fe1df75572687e527c34eb608370d446bc670..67b260877f305a33c8a8b4aa37d5862478fe7835 100644 (file)
@@ -259,10 +259,8 @@ static int __init fixed_mdio_bus_init(void)
        int ret;
 
        pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0);
-       if (IS_ERR(pdev)) {
-               ret = PTR_ERR(pdev);
-               goto err_pdev;
-       }
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
 
        fmb->mii_bus = mdiobus_alloc();
        if (fmb->mii_bus == NULL) {
@@ -287,7 +285,6 @@ static int __init fixed_mdio_bus_init(void)
        mdiobus_free(fmb->mii_bus);
 err_mdiobus_reg:
        platform_device_unregister(pdev);
-err_pdev:
        return ret;
 }
 module_init(fixed_mdio_bus_init);
index 082ffef0dec4e31e6f92f7b11ad020a7b9a16376..bc90764a8b8dcd7dd7140c20f39e8538d47de23e 100644 (file)
 struct mdio_mux_gpio_state {
        struct gpio_descs *gpios;
        void *mux_handle;
+       int values[];
 };
 
 static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
                                   void *data)
 {
        struct mdio_mux_gpio_state *s = data;
-       int values[s->gpios->ndescs];
        unsigned int n;
 
        if (current_child == desired_child)
                return 0;
 
        for (n = 0; n < s->gpios->ndescs; n++)
-               values[n] = (desired_child >> n) & 1;
+               s->values[n] = (desired_child >> n) & 1;
 
        gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc,
-                                      values);
+                                      s->values);
 
        return 0;
 }
@@ -44,15 +44,21 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
 static int mdio_mux_gpio_probe(struct platform_device *pdev)
 {
        struct mdio_mux_gpio_state *s;
+       struct gpio_descs *gpios;
        int r;
 
-       s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
-       if (!s)
+       gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
+       if (IS_ERR(gpios))
+               return PTR_ERR(gpios);
+
+       s = devm_kzalloc(&pdev->dev, struct_size(s, values, gpios->ndescs),
+                        GFP_KERNEL);
+       if (!s) {
+               gpiod_put_array(gpios);
                return -ENOMEM;
+       }
 
-       s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
-       if (IS_ERR(s->gpios))
-               return PTR_ERR(s->gpios);
+       s->gpios = gpios;
 
        r = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
                          mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL);
index 537297d2b4b4309adeacd1e66541b2aa1830b8b8..d2baedc4ea91fb6f7221984e1e54b7d7f94b20de 100644 (file)
@@ -467,6 +467,14 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
 }
 EXPORT_SYMBOL(phy_mii_ioctl);
 
+static int phy_config_aneg(struct phy_device *phydev)
+{
+       if (phydev->drv->config_aneg)
+               return phydev->drv->config_aneg(phydev);
+       else
+               return genphy_config_aneg(phydev);
+}
+
 /**
  * phy_start_aneg_priv - start auto-negotiation for this PHY device
  * @phydev: the phy_device struct
@@ -493,10 +501,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
        /* Invalidate LP advertising flags */
        phydev->lp_advertising = 0;
 
-       if (phydev->drv->config_aneg)
-               err = phydev->drv->config_aneg(phydev);
-       else
-               err = genphy_config_aneg(phydev);
+       err = phy_config_aneg(phydev);
        if (err < 0)
                goto out_unlock;
 
@@ -546,6 +551,84 @@ int phy_start_aneg(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(phy_start_aneg);
 
+static int phy_poll_aneg_done(struct phy_device *phydev)
+{
+       unsigned int retries = 100;
+       int ret;
+
+       do {
+               msleep(100);
+               ret = phy_aneg_done(phydev);
+       } while (!ret && --retries);
+
+       if (!ret)
+               return -ETIMEDOUT;
+
+       return ret < 0 ? ret : 0;
+}
+
+/**
+ * phy_speed_down - set speed to lowest speed supported by both link partners
+ * @phydev: the phy_device struct
+ * @sync: perform action synchronously
+ *
+ * Description: Typically used to save energy when waiting for a WoL packet
+ *
+ * WARNING: Setting sync to false may cause the system being unable to suspend
+ * in case the PHY generates an interrupt when finishing the autonegotiation.
+ * This interrupt may wake up the system immediately after suspend.
+ * Therefore use sync = false only if you're sure it's safe with the respective
+ * network chip.
+ */
+int phy_speed_down(struct phy_device *phydev, bool sync)
+{
+       u32 adv = phydev->lp_advertising & phydev->supported;
+       u32 adv_old = phydev->advertising;
+       int ret;
+
+       if (phydev->autoneg != AUTONEG_ENABLE)
+               return 0;
+
+       if (adv & PHY_10BT_FEATURES)
+               phydev->advertising &= ~(PHY_100BT_FEATURES |
+                                        PHY_1000BT_FEATURES);
+       else if (adv & PHY_100BT_FEATURES)
+               phydev->advertising &= ~PHY_1000BT_FEATURES;
+
+       if (phydev->advertising == adv_old)
+               return 0;
+
+       ret = phy_config_aneg(phydev);
+       if (ret)
+               return ret;
+
+       return sync ? phy_poll_aneg_done(phydev) : 0;
+}
+EXPORT_SYMBOL_GPL(phy_speed_down);
+
+/**
+ * phy_speed_up - (re)set advertised speeds to all supported speeds
+ * @phydev: the phy_device struct
+ *
+ * Description: Used to revert the effect of phy_speed_down
+ */
+int phy_speed_up(struct phy_device *phydev)
+{
+       u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES;
+       u32 adv_old = phydev->advertising;
+
+       if (phydev->autoneg != AUTONEG_ENABLE)
+               return 0;
+
+       phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask);
+
+       if (phydev->advertising == adv_old)
+               return 0;
+
+       return phy_config_aneg(phydev);
+}
+EXPORT_SYMBOL_GPL(phy_speed_up);
+
 /**
  * phy_start_machine - start PHY state machine tracking
  * @phydev: the phy_device struct
index 082fb40c656dc8b9da48cda1d65b58802ca0259f..7fc8508b5231d94beab4c45bf7666d15d4ef786f 100644 (file)
@@ -37,6 +37,9 @@
 #define RTL8201F_ISR                           0x1e
 #define RTL8201F_IER                           0x13
 
+#define RTL8366RB_POWER_SAVE                   0x15
+#define RTL8366RB_POWER_SAVE_ON                        BIT(12)
+
 MODULE_DESCRIPTION("Realtek PHY driver");
 MODULE_AUTHOR("Johnson Leung");
 MODULE_LICENSE("GPL");
@@ -128,6 +131,37 @@ static int rtl8211f_config_intr(struct phy_device *phydev)
        return phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
 }
 
+static int rtl8211_config_aneg(struct phy_device *phydev)
+{
+       int ret;
+
+       ret = genphy_config_aneg(phydev);
+       if (ret < 0)
+               return ret;
+
+       /* Quirk was copied from vendor driver. Unfortunately it includes no
+        * description of the magic numbers.
+        */
+       if (phydev->speed == SPEED_100 && phydev->autoneg == AUTONEG_DISABLE) {
+               phy_write(phydev, 0x17, 0x2138);
+               phy_write(phydev, 0x0e, 0x0260);
+       } else {
+               phy_write(phydev, 0x17, 0x2108);
+               phy_write(phydev, 0x0e, 0x0000);
+       }
+
+       return 0;
+}
+
+static int rtl8211c_config_init(struct phy_device *phydev)
+{
+       /* RTL8211C has an issue when operating in Gigabit slave mode */
+       phy_set_bits(phydev, MII_CTRL1000,
+                    CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
+
+       return genphy_config_init(phydev);
+}
+
 static int rtl8211f_config_init(struct phy_device *phydev)
 {
        int ret;
@@ -159,6 +193,24 @@ static int rtl8211b_resume(struct phy_device *phydev)
        return genphy_resume(phydev);
 }
 
+static int rtl8366rb_config_init(struct phy_device *phydev)
+{
+       int ret;
+
+       ret = genphy_config_init(phydev);
+       if (ret < 0)
+               return ret;
+
+       ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE,
+                          RTL8366RB_POWER_SAVE_ON);
+       if (ret) {
+               dev_err(&phydev->mdio.dev,
+                       "error enabling power management\n");
+       }
+
+       return ret;
+}
+
 static struct phy_driver realtek_drvs[] = {
        {
                .phy_id         = 0x00008201,
@@ -178,6 +230,14 @@ static struct phy_driver realtek_drvs[] = {
                .resume         = genphy_resume,
                .read_page      = rtl821x_read_page,
                .write_page     = rtl821x_write_page,
+       }, {
+               .phy_id         = 0x001cc910,
+               .name           = "RTL8211 Gigabit Ethernet",
+               .phy_id_mask    = 0x001fffff,
+               .features       = PHY_GBIT_FEATURES,
+               .config_aneg    = rtl8211_config_aneg,
+               .read_mmd       = &genphy_read_mmd_unsupported,
+               .write_mmd      = &genphy_write_mmd_unsupported,
        }, {
                .phy_id         = 0x001cc912,
                .name           = "RTL8211B Gigabit Ethernet",
@@ -190,6 +250,14 @@ static struct phy_driver realtek_drvs[] = {
                .write_mmd      = &genphy_write_mmd_unsupported,
                .suspend        = rtl8211b_suspend,
                .resume         = rtl8211b_resume,
+       }, {
+               .phy_id         = 0x001cc913,
+               .name           = "RTL8211C Gigabit Ethernet",
+               .phy_id_mask    = 0x001fffff,
+               .features       = PHY_GBIT_FEATURES,
+               .config_init    = rtl8211c_config_init,
+               .read_mmd       = &genphy_read_mmd_unsupported,
+               .write_mmd      = &genphy_write_mmd_unsupported,
        }, {
                .phy_id         = 0x001cc914,
                .name           = "RTL8211DN Gigabit Ethernet",
@@ -223,6 +291,15 @@ static struct phy_driver realtek_drvs[] = {
                .resume         = genphy_resume,
                .read_page      = rtl821x_read_page,
                .write_page     = rtl821x_write_page,
+       }, {
+               .phy_id         = 0x001cc961,
+               .name           = "RTL8366RB Gigabit Ethernet",
+               .phy_id_mask    = 0x001fffff,
+               .features       = PHY_GBIT_FEATURES,
+               .flags          = PHY_HAS_INTERRUPT,
+               .config_init    = &rtl8366rb_config_init,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
        },
 };
 
@@ -230,10 +307,13 @@ module_phy_driver(realtek_drvs);
 
 static struct mdio_device_id __maybe_unused realtek_tbl[] = {
        { 0x001cc816, 0x001fffff },
+       { 0x001cc910, 0x001fffff },
        { 0x001cc912, 0x001fffff },
+       { 0x001cc913, 0x001fffff },
        { 0x001cc914, 0x001fffff },
        { 0x001cc915, 0x001fffff },
        { 0x001cc916, 0x001fffff },
+       { 0x001cc961, 0x001fffff },
        { }
 };
 
index c4c92db86dfa8449e4416cdf8b1e0cc70f2c2d39..5661226cf75b129b15877747ced78be732240779 100644 (file)
@@ -1,5 +1,7 @@
+#include <linux/ctype.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
+#include <linux/hwmon.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
@@ -131,6 +133,12 @@ struct sfp {
        unsigned int sm_retries;
 
        struct sfp_eeprom_id id;
+#if IS_ENABLED(CONFIG_HWMON)
+       struct sfp_diag diag;
+       struct device *hwmon_dev;
+       char *hwmon_name;
+#endif
+
 };
 
 static bool sff_module_supported(const struct sfp_eeprom_id *id)
@@ -316,6 +324,719 @@ static unsigned int sfp_check(void *buf, size_t len)
        return check;
 }
 
+/* hwmon */
+#if IS_ENABLED(CONFIG_HWMON)
+static umode_t sfp_hwmon_is_visible(const void *data,
+                                   enum hwmon_sensor_types type,
+                                   u32 attr, int channel)
+{
+       const struct sfp *sfp = data;
+
+       switch (type) {
+       case hwmon_temp:
+               switch (attr) {
+               case hwmon_temp_input:
+               case hwmon_temp_min_alarm:
+               case hwmon_temp_max_alarm:
+               case hwmon_temp_lcrit_alarm:
+               case hwmon_temp_crit_alarm:
+               case hwmon_temp_min:
+               case hwmon_temp_max:
+               case hwmon_temp_lcrit:
+               case hwmon_temp_crit:
+                       return 0444;
+               default:
+                       return 0;
+               }
+       case hwmon_in:
+               switch (attr) {
+               case hwmon_in_input:
+               case hwmon_in_min_alarm:
+               case hwmon_in_max_alarm:
+               case hwmon_in_lcrit_alarm:
+               case hwmon_in_crit_alarm:
+               case hwmon_in_min:
+               case hwmon_in_max:
+               case hwmon_in_lcrit:
+               case hwmon_in_crit:
+                       return 0444;
+               default:
+                       return 0;
+               }
+       case hwmon_curr:
+               switch (attr) {
+               case hwmon_curr_input:
+               case hwmon_curr_min_alarm:
+               case hwmon_curr_max_alarm:
+               case hwmon_curr_lcrit_alarm:
+               case hwmon_curr_crit_alarm:
+               case hwmon_curr_min:
+               case hwmon_curr_max:
+               case hwmon_curr_lcrit:
+               case hwmon_curr_crit:
+                       return 0444;
+               default:
+                       return 0;
+               }
+       case hwmon_power:
+               /* External calibration of receive power requires
+                * floating point arithmetic. Doing that in the kernel
+                * is not easy, so just skip it. If the module does
+                * not require external calibration, we can however
+                * show receiver power, since FP is then not needed.
+                */
+               if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL &&
+                   channel == 1)
+                       return 0;
+               switch (attr) {
+               case hwmon_power_input:
+               case hwmon_power_min_alarm:
+               case hwmon_power_max_alarm:
+               case hwmon_power_lcrit_alarm:
+               case hwmon_power_crit_alarm:
+               case hwmon_power_min:
+               case hwmon_power_max:
+               case hwmon_power_lcrit:
+               case hwmon_power_crit:
+                       return 0444;
+               default:
+                       return 0;
+               }
+       default:
+               return 0;
+       }
+}
+
+static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value)
+{
+       __be16 val;
+       int err;
+
+       err = sfp_read(sfp, true, reg, &val, sizeof(val));
+       if (err < 0)
+               return err;
+
+       *value = be16_to_cpu(val);
+
+       return 0;
+}
+
+static void sfp_hwmon_to_rx_power(long *value)
+{
+       *value = DIV_ROUND_CLOSEST(*value, 100);
+}
+
+static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset,
+                               long *value)
+{
+       if (sfp->id.ext.diagmon & SFP_DIAGMON_EXT_CAL)
+               *value = DIV_ROUND_CLOSEST(*value * slope, 256) + offset;
+}
+
+static void sfp_hwmon_calibrate_temp(struct sfp *sfp, long *value)
+{
+       sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_t_slope),
+                           be16_to_cpu(sfp->diag.cal_t_offset), value);
+
+       if (*value >= 0x8000)
+               *value -= 0x10000;
+
+       *value = DIV_ROUND_CLOSEST(*value * 1000, 256);
+}
+
+static void sfp_hwmon_calibrate_vcc(struct sfp *sfp, long *value)
+{
+       sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_v_slope),
+                           be16_to_cpu(sfp->diag.cal_v_offset), value);
+
+       *value = DIV_ROUND_CLOSEST(*value, 10);
+}
+
+static void sfp_hwmon_calibrate_bias(struct sfp *sfp, long *value)
+{
+       sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txi_slope),
+                           be16_to_cpu(sfp->diag.cal_txi_offset), value);
+
+       *value = DIV_ROUND_CLOSEST(*value, 500);
+}
+
+static void sfp_hwmon_calibrate_tx_power(struct sfp *sfp, long *value)
+{
+       sfp_hwmon_calibrate(sfp, be16_to_cpu(sfp->diag.cal_txpwr_slope),
+                           be16_to_cpu(sfp->diag.cal_txpwr_offset), value);
+
+       *value = DIV_ROUND_CLOSEST(*value, 10);
+}
+
+static int sfp_hwmon_read_temp(struct sfp *sfp, int reg, long *value)
+{
+       int err;
+
+       err = sfp_hwmon_read_sensor(sfp, reg, value);
+       if (err < 0)
+               return err;
+
+       sfp_hwmon_calibrate_temp(sfp, value);
+
+       return 0;
+}
+
+static int sfp_hwmon_read_vcc(struct sfp *sfp, int reg, long *value)
+{
+       int err;
+
+       err = sfp_hwmon_read_sensor(sfp, reg, value);
+       if (err < 0)
+               return err;
+
+       sfp_hwmon_calibrate_vcc(sfp, value);
+
+       return 0;
+}
+
+static int sfp_hwmon_read_bias(struct sfp *sfp, int reg, long *value)
+{
+       int err;
+
+       err = sfp_hwmon_read_sensor(sfp, reg, value);
+       if (err < 0)
+               return err;
+
+       sfp_hwmon_calibrate_bias(sfp, value);
+
+       return 0;
+}
+
+static int sfp_hwmon_read_tx_power(struct sfp *sfp, int reg, long *value)
+{
+       int err;
+
+       err = sfp_hwmon_read_sensor(sfp, reg, value);
+       if (err < 0)
+               return err;
+
+       sfp_hwmon_calibrate_tx_power(sfp, value);
+
+       return 0;
+}
+
+static int sfp_hwmon_read_rx_power(struct sfp *sfp, int reg, long *value)
+{
+       int err;
+
+       err = sfp_hwmon_read_sensor(sfp, reg, value);
+       if (err < 0)
+               return err;
+
+       sfp_hwmon_to_rx_power(value);
+
+       return 0;
+}
+
+static int sfp_hwmon_temp(struct sfp *sfp, u32 attr, long *value)
+{
+       u8 status;
+       int err;
+
+       switch (attr) {
+       case hwmon_temp_input:
+               return sfp_hwmon_read_temp(sfp, SFP_TEMP, value);
+
+       case hwmon_temp_lcrit:
+               *value = be16_to_cpu(sfp->diag.temp_low_alarm);
+               sfp_hwmon_calibrate_temp(sfp, value);
+               return 0;
+
+       case hwmon_temp_min:
+               *value = be16_to_cpu(sfp->diag.temp_low_warn);
+               sfp_hwmon_calibrate_temp(sfp, value);
+               return 0;
+       case hwmon_temp_max:
+               *value = be16_to_cpu(sfp->diag.temp_high_warn);
+               sfp_hwmon_calibrate_temp(sfp, value);
+               return 0;
+
+       case hwmon_temp_crit:
+               *value = be16_to_cpu(sfp->diag.temp_high_alarm);
+               sfp_hwmon_calibrate_temp(sfp, value);
+               return 0;
+
+       case hwmon_temp_lcrit_alarm:
+               err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_ALARM0_TEMP_LOW);
+               return 0;
+
+       case hwmon_temp_min_alarm:
+               err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_WARN0_TEMP_LOW);
+               return 0;
+
+       case hwmon_temp_max_alarm:
+               err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_WARN0_TEMP_HIGH);
+               return 0;
+
+       case hwmon_temp_crit_alarm:
+               err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_ALARM0_TEMP_HIGH);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_vcc(struct sfp *sfp, u32 attr, long *value)
+{
+       u8 status;
+       int err;
+
+       switch (attr) {
+       case hwmon_in_input:
+               return sfp_hwmon_read_vcc(sfp, SFP_VCC, value);
+
+       case hwmon_in_lcrit:
+               *value = be16_to_cpu(sfp->diag.volt_low_alarm);
+               sfp_hwmon_calibrate_vcc(sfp, value);
+               return 0;
+
+       case hwmon_in_min:
+               *value = be16_to_cpu(sfp->diag.volt_low_warn);
+               sfp_hwmon_calibrate_vcc(sfp, value);
+               return 0;
+
+       case hwmon_in_max:
+               *value = be16_to_cpu(sfp->diag.volt_high_warn);
+               sfp_hwmon_calibrate_vcc(sfp, value);
+               return 0;
+
+       case hwmon_in_crit:
+               *value = be16_to_cpu(sfp->diag.volt_high_alarm);
+               sfp_hwmon_calibrate_vcc(sfp, value);
+               return 0;
+
+       case hwmon_in_lcrit_alarm:
+               err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_ALARM0_VCC_LOW);
+               return 0;
+
+       case hwmon_in_min_alarm:
+               err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_WARN0_VCC_LOW);
+               return 0;
+
+       case hwmon_in_max_alarm:
+               err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_WARN0_VCC_HIGH);
+               return 0;
+
+       case hwmon_in_crit_alarm:
+               err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_ALARM0_VCC_HIGH);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_bias(struct sfp *sfp, u32 attr, long *value)
+{
+       u8 status;
+       int err;
+
+       switch (attr) {
+       case hwmon_curr_input:
+               return sfp_hwmon_read_bias(sfp, SFP_TX_BIAS, value);
+
+       case hwmon_curr_lcrit:
+               *value = be16_to_cpu(sfp->diag.bias_low_alarm);
+               sfp_hwmon_calibrate_bias(sfp, value);
+               return 0;
+
+       case hwmon_curr_min:
+               *value = be16_to_cpu(sfp->diag.bias_low_warn);
+               sfp_hwmon_calibrate_bias(sfp, value);
+               return 0;
+
+       case hwmon_curr_max:
+               *value = be16_to_cpu(sfp->diag.bias_high_warn);
+               sfp_hwmon_calibrate_bias(sfp, value);
+               return 0;
+
+       case hwmon_curr_crit:
+               *value = be16_to_cpu(sfp->diag.bias_high_alarm);
+               sfp_hwmon_calibrate_bias(sfp, value);
+               return 0;
+
+       case hwmon_curr_lcrit_alarm:
+               err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_ALARM0_TX_BIAS_LOW);
+               return 0;
+
+       case hwmon_curr_min_alarm:
+               err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_WARN0_TX_BIAS_LOW);
+               return 0;
+
+       case hwmon_curr_max_alarm:
+               err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_WARN0_TX_BIAS_HIGH);
+               return 0;
+
+       case hwmon_curr_crit_alarm:
+               err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_ALARM0_TX_BIAS_HIGH);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_tx_power(struct sfp *sfp, u32 attr, long *value)
+{
+       u8 status;
+       int err;
+
+       switch (attr) {
+       case hwmon_power_input:
+               return sfp_hwmon_read_tx_power(sfp, SFP_TX_POWER, value);
+
+       case hwmon_power_lcrit:
+               *value = be16_to_cpu(sfp->diag.txpwr_low_alarm);
+               sfp_hwmon_calibrate_tx_power(sfp, value);
+               return 0;
+
+       case hwmon_power_min:
+               *value = be16_to_cpu(sfp->diag.txpwr_low_warn);
+               sfp_hwmon_calibrate_tx_power(sfp, value);
+               return 0;
+
+       case hwmon_power_max:
+               *value = be16_to_cpu(sfp->diag.txpwr_high_warn);
+               sfp_hwmon_calibrate_tx_power(sfp, value);
+               return 0;
+
+       case hwmon_power_crit:
+               *value = be16_to_cpu(sfp->diag.txpwr_high_alarm);
+               sfp_hwmon_calibrate_tx_power(sfp, value);
+               return 0;
+
+       case hwmon_power_lcrit_alarm:
+               err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_ALARM0_TXPWR_LOW);
+               return 0;
+
+       case hwmon_power_min_alarm:
+               err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_WARN0_TXPWR_LOW);
+               return 0;
+
+       case hwmon_power_max_alarm:
+               err = sfp_read(sfp, true, SFP_WARN0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_WARN0_TXPWR_HIGH);
+               return 0;
+
+       case hwmon_power_crit_alarm:
+               err = sfp_read(sfp, true, SFP_ALARM0, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_ALARM0_TXPWR_HIGH);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_rx_power(struct sfp *sfp, u32 attr, long *value)
+{
+       u8 status;
+       int err;
+
+       switch (attr) {
+       case hwmon_power_input:
+               return sfp_hwmon_read_rx_power(sfp, SFP_RX_POWER, value);
+
+       case hwmon_power_lcrit:
+               *value = be16_to_cpu(sfp->diag.rxpwr_low_alarm);
+               sfp_hwmon_to_rx_power(value);
+               return 0;
+
+       case hwmon_power_min:
+               *value = be16_to_cpu(sfp->diag.rxpwr_low_warn);
+               sfp_hwmon_to_rx_power(value);
+               return 0;
+
+       case hwmon_power_max:
+               *value = be16_to_cpu(sfp->diag.rxpwr_high_warn);
+               sfp_hwmon_to_rx_power(value);
+               return 0;
+
+       case hwmon_power_crit:
+               *value = be16_to_cpu(sfp->diag.rxpwr_high_alarm);
+               sfp_hwmon_to_rx_power(value);
+               return 0;
+
+       case hwmon_power_lcrit_alarm:
+               err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_ALARM1_RXPWR_LOW);
+               return 0;
+
+       case hwmon_power_min_alarm:
+               err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_WARN1_RXPWR_LOW);
+               return 0;
+
+       case hwmon_power_max_alarm:
+               err = sfp_read(sfp, true, SFP_WARN1, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_WARN1_RXPWR_HIGH);
+               return 0;
+
+       case hwmon_power_crit_alarm:
+               err = sfp_read(sfp, true, SFP_ALARM1, &status, sizeof(status));
+               if (err < 0)
+                       return err;
+
+               *value = !!(status & SFP_ALARM1_RXPWR_HIGH);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int sfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+                         u32 attr, int channel, long *value)
+{
+       struct sfp *sfp = dev_get_drvdata(dev);
+
+       switch (type) {
+       case hwmon_temp:
+               return sfp_hwmon_temp(sfp, attr, value);
+       case hwmon_in:
+               return sfp_hwmon_vcc(sfp, attr, value);
+       case hwmon_curr:
+               return sfp_hwmon_bias(sfp, attr, value);
+       case hwmon_power:
+               switch (channel) {
+               case 0:
+                       return sfp_hwmon_tx_power(sfp, attr, value);
+               case 1:
+                       return sfp_hwmon_rx_power(sfp, attr, value);
+               default:
+                       return -EOPNOTSUPP;
+               }
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static const struct hwmon_ops sfp_hwmon_ops = {
+       .is_visible = sfp_hwmon_is_visible,
+       .read = sfp_hwmon_read,
+};
+
+static u32 sfp_hwmon_chip_config[] = {
+       HWMON_C_REGISTER_TZ,
+       0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_chip = {
+       .type = hwmon_chip,
+       .config = sfp_hwmon_chip_config,
+};
+
+static u32 sfp_hwmon_temp_config[] = {
+       HWMON_T_INPUT |
+       HWMON_T_MAX | HWMON_T_MIN |
+       HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+       HWMON_T_CRIT | HWMON_T_LCRIT |
+       HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
+       0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_temp_channel_info = {
+       .type = hwmon_temp,
+       .config = sfp_hwmon_temp_config,
+};
+
+static u32 sfp_hwmon_vcc_config[] = {
+       HWMON_I_INPUT |
+       HWMON_I_MAX | HWMON_I_MIN |
+       HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
+       HWMON_I_CRIT | HWMON_I_LCRIT |
+       HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM,
+       0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_vcc_channel_info = {
+       .type = hwmon_in,
+       .config = sfp_hwmon_vcc_config,
+};
+
+static u32 sfp_hwmon_bias_config[] = {
+       HWMON_C_INPUT |
+       HWMON_C_MAX | HWMON_C_MIN |
+       HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
+       HWMON_C_CRIT | HWMON_C_LCRIT |
+       HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM,
+       0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_bias_channel_info = {
+       .type = hwmon_curr,
+       .config = sfp_hwmon_bias_config,
+};
+
+static u32 sfp_hwmon_power_config[] = {
+       /* Transmit power */
+       HWMON_P_INPUT |
+       HWMON_P_MAX | HWMON_P_MIN |
+       HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+       HWMON_P_CRIT | HWMON_P_LCRIT |
+       HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM,
+       /* Receive power */
+       HWMON_P_INPUT |
+       HWMON_P_MAX | HWMON_P_MIN |
+       HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+       HWMON_P_CRIT | HWMON_P_LCRIT |
+       HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM,
+       0,
+};
+
+static const struct hwmon_channel_info sfp_hwmon_power_channel_info = {
+       .type = hwmon_power,
+       .config = sfp_hwmon_power_config,
+};
+
+static const struct hwmon_channel_info *sfp_hwmon_info[] = {
+       &sfp_hwmon_chip,
+       &sfp_hwmon_vcc_channel_info,
+       &sfp_hwmon_temp_channel_info,
+       &sfp_hwmon_bias_channel_info,
+       &sfp_hwmon_power_channel_info,
+       NULL,
+};
+
+static const struct hwmon_chip_info sfp_hwmon_chip_info = {
+       .ops = &sfp_hwmon_ops,
+       .info = sfp_hwmon_info,
+};
+
+static int sfp_hwmon_insert(struct sfp *sfp)
+{
+       int err, i;
+
+       if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE)
+               return 0;
+
+       if (!(sfp->id.ext.diagmon & SFP_DIAGMON_DDM))
+               return 0;
+
+       if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)
+               /* This driver in general does not support address
+                * change.
+                */
+               return 0;
+
+       err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
+       if (err < 0)
+               return err;
+
+       sfp->hwmon_name = kstrdup(dev_name(sfp->dev), GFP_KERNEL);
+       if (!sfp->hwmon_name)
+               return -ENODEV;
+
+       for (i = 0; sfp->hwmon_name[i]; i++)
+               if (hwmon_is_bad_char(sfp->hwmon_name[i]))
+                       sfp->hwmon_name[i] = '_';
+
+       sfp->hwmon_dev = hwmon_device_register_with_info(sfp->dev,
+                                                        sfp->hwmon_name, sfp,
+                                                        &sfp_hwmon_chip_info,
+                                                        NULL);
+
+       return PTR_ERR_OR_ZERO(sfp->hwmon_dev);
+}
+
+static void sfp_hwmon_remove(struct sfp *sfp)
+{
+       hwmon_device_unregister(sfp->hwmon_dev);
+       kfree(sfp->hwmon_name);
+}
+#else
+static int sfp_hwmon_insert(struct sfp *sfp)
+{
+       return 0;
+}
+
+static void sfp_hwmon_remove(struct sfp *sfp)
+{
+}
+#endif
+
 /* Helpers */
 static void sfp_module_tx_disable(struct sfp *sfp)
 {
@@ -636,6 +1357,10 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
                dev_warn(sfp->dev,
                         "module address swap to access page 0xA2 is not supported.\n");
 
+       ret = sfp_hwmon_insert(sfp);
+       if (ret < 0)
+               return ret;
+
        ret = sfp_module_insert(sfp->sfp_bus, &sfp->id);
        if (ret < 0)
                return ret;
@@ -647,6 +1372,8 @@ static void sfp_sm_mod_remove(struct sfp *sfp)
 {
        sfp_module_remove(sfp->sfp_bus);
 
+       sfp_hwmon_remove(sfp);
+
        if (sfp->mod_phy)
                sfp_sm_phy_detach(sfp);
 
index d9dd8fbfffc795d6e1023f542aa63e59a0b37b4c..fbf9ad429593ce6ab08a1d17ce7be726eecce814 100644 (file)
 #define PHY_ID_VSC8572                 0x000704d0
 #define PHY_ID_VSC8574                 0x000704a0
 #define PHY_ID_VSC8601                 0x00070420
+#define PHY_ID_VSC7385                 0x00070450
+#define PHY_ID_VSC7388                 0x00070480
+#define PHY_ID_VSC7395                 0x00070550
+#define PHY_ID_VSC7398                 0x00070580
 #define PHY_ID_VSC8662                 0x00070660
 #define PHY_ID_VSC8221                 0x000fc550
 #define PHY_ID_VSC8211                 0x000fc4b0
@@ -116,6 +120,137 @@ static int vsc824x_config_init(struct phy_device *phydev)
        return err;
 }
 
+#define VSC73XX_EXT_PAGE_ACCESS 0x1f
+
+static int vsc73xx_read_page(struct phy_device *phydev)
+{
+       return __phy_read(phydev, VSC73XX_EXT_PAGE_ACCESS);
+}
+
+static int vsc73xx_write_page(struct phy_device *phydev, int page)
+{
+       return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page);
+}
+
+static void vsc73xx_config_init(struct phy_device *phydev)
+{
+       /* Receiver init */
+       phy_write(phydev, 0x1f, 0x2a30);
+       phy_modify(phydev, 0x0c, 0x0300, 0x0200);
+       phy_write(phydev, 0x1f, 0x0000);
+
+       /* Config LEDs 0x61 */
+       phy_modify(phydev, MII_TPISTATUS, 0xff00, 0x0061);
+}
+
+static int vsc738x_config_init(struct phy_device *phydev)
+{
+       u16 rev;
+       /* This magic sequence appear in the application note
+        * "VSC7385/7388 PHY Configuration".
+        *
+        * Maybe one day we will get to know what it all means.
+        */
+       phy_write(phydev, 0x1f, 0x2a30);
+       phy_modify(phydev, 0x08, 0x0200, 0x0200);
+       phy_write(phydev, 0x1f, 0x52b5);
+       phy_write(phydev, 0x10, 0xb68a);
+       phy_modify(phydev, 0x12, 0xff07, 0x0003);
+       phy_modify(phydev, 0x11, 0x00ff, 0x00a2);
+       phy_write(phydev, 0x10, 0x968a);
+       phy_write(phydev, 0x1f, 0x2a30);
+       phy_modify(phydev, 0x08, 0x0200, 0x0000);
+       phy_write(phydev, 0x1f, 0x0000);
+
+       /* Read revision */
+       rev = phy_read(phydev, MII_PHYSID2);
+       rev &= 0x0f;
+
+       /* Special quirk for revision 0 */
+       if (rev == 0) {
+               phy_write(phydev, 0x1f, 0x2a30);
+               phy_modify(phydev, 0x08, 0x0200, 0x0200);
+               phy_write(phydev, 0x1f, 0x52b5);
+               phy_write(phydev, 0x12, 0x0000);
+               phy_write(phydev, 0x11, 0x0689);
+               phy_write(phydev, 0x10, 0x8f92);
+               phy_write(phydev, 0x1f, 0x52b5);
+               phy_write(phydev, 0x12, 0x0000);
+               phy_write(phydev, 0x11, 0x0e35);
+               phy_write(phydev, 0x10, 0x9786);
+               phy_write(phydev, 0x1f, 0x2a30);
+               phy_modify(phydev, 0x08, 0x0200, 0x0000);
+               phy_write(phydev, 0x17, 0xff80);
+               phy_write(phydev, 0x17, 0x0000);
+       }
+
+       phy_write(phydev, 0x1f, 0x0000);
+       phy_write(phydev, 0x12, 0x0048);
+
+       if (rev == 0) {
+               phy_write(phydev, 0x1f, 0x2a30);
+               phy_write(phydev, 0x14, 0x6600);
+               phy_write(phydev, 0x1f, 0x0000);
+               phy_write(phydev, 0x18, 0xa24e);
+       } else {
+               phy_write(phydev, 0x1f, 0x2a30);
+               phy_modify(phydev, 0x16, 0x0fc0, 0x0240);
+               phy_modify(phydev, 0x14, 0x6000, 0x4000);
+               /* bits 14-15 in extended register 0x14 controls DACG amplitude
+                * 6 = -8%, 2 is hardware default
+                */
+               phy_write(phydev, 0x1f, 0x0001);
+               phy_modify(phydev, 0x14, 0xe000, 0x6000);
+               phy_write(phydev, 0x1f, 0x0000);
+       }
+
+       vsc73xx_config_init(phydev);
+
+       return genphy_config_init(phydev);
+}
+
+static int vsc739x_config_init(struct phy_device *phydev)
+{
+       /* This magic sequence appears in the VSC7395 SparX-G5e application
+        * note "VSC7395/VSC7398 PHY Configuration"
+        *
+        * Maybe one day we will get to know what it all means.
+        */
+       phy_write(phydev, 0x1f, 0x2a30);
+       phy_modify(phydev, 0x08, 0x0200, 0x0200);
+       phy_write(phydev, 0x1f, 0x52b5);
+       phy_write(phydev, 0x10, 0xb68a);
+       phy_modify(phydev, 0x12, 0xff07, 0x0003);
+       phy_modify(phydev, 0x11, 0x00ff, 0x00a2);
+       phy_write(phydev, 0x10, 0x968a);
+       phy_write(phydev, 0x1f, 0x2a30);
+       phy_modify(phydev, 0x08, 0x0200, 0x0000);
+       phy_write(phydev, 0x1f, 0x0000);
+
+       phy_write(phydev, 0x1f, 0x0000);
+       phy_write(phydev, 0x12, 0x0048);
+       phy_write(phydev, 0x1f, 0x2a30);
+       phy_modify(phydev, 0x16, 0x0fc0, 0x0240);
+       phy_modify(phydev, 0x14, 0x6000, 0x4000);
+       phy_write(phydev, 0x1f, 0x0001);
+       phy_modify(phydev, 0x14, 0xe000, 0x6000);
+       phy_write(phydev, 0x1f, 0x0000);
+
+       vsc73xx_config_init(phydev);
+
+       return genphy_config_init(phydev);
+}
+
+static int vsc73xx_config_aneg(struct phy_device *phydev)
+{
+       /* The VSC73xx switches does not like to be instructed to
+        * do autonegotiation in any way, it prefers that you just go
+        * with the power-on/reset defaults. Writing some registers will
+        * just make autonegotiation permanently fail.
+        */
+       return 0;
+}
+
 /* This adds a skew for both TX and RX clocks, so the skew should only be
  * applied to "rgmii-id" interfaces. It may not work as expected
  * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
@@ -318,6 +453,42 @@ static struct phy_driver vsc82xx_driver[] = {
        .config_init    = &vsc8601_config_init,
        .ack_interrupt  = &vsc824x_ack_interrupt,
        .config_intr    = &vsc82xx_config_intr,
+}, {
+       .phy_id         = PHY_ID_VSC7385,
+       .name           = "Vitesse VSC7385",
+       .phy_id_mask    = 0x000ffff0,
+       .features       = PHY_GBIT_FEATURES,
+       .config_init    = vsc738x_config_init,
+       .config_aneg    = vsc73xx_config_aneg,
+       .read_page      = vsc73xx_read_page,
+       .write_page     = vsc73xx_write_page,
+}, {
+       .phy_id         = PHY_ID_VSC7388,
+       .name           = "Vitesse VSC7388",
+       .phy_id_mask    = 0x000ffff0,
+       .features       = PHY_GBIT_FEATURES,
+       .config_init    = vsc738x_config_init,
+       .config_aneg    = vsc73xx_config_aneg,
+       .read_page      = vsc73xx_read_page,
+       .write_page     = vsc73xx_write_page,
+}, {
+       .phy_id         = PHY_ID_VSC7395,
+       .name           = "Vitesse VSC7395",
+       .phy_id_mask    = 0x000ffff0,
+       .features       = PHY_GBIT_FEATURES,
+       .config_init    = vsc739x_config_init,
+       .config_aneg    = vsc73xx_config_aneg,
+       .read_page      = vsc73xx_read_page,
+       .write_page     = vsc73xx_write_page,
+}, {
+       .phy_id         = PHY_ID_VSC7398,
+       .name           = "Vitesse VSC7398",
+       .phy_id_mask    = 0x000ffff0,
+       .features       = PHY_GBIT_FEATURES,
+       .config_init    = vsc739x_config_init,
+       .config_aneg    = vsc73xx_config_aneg,
+       .read_page      = vsc73xx_read_page,
+       .write_page     = vsc73xx_write_page,
 }, {
        .phy_id         = PHY_ID_VSC8662,
        .name           = "Vitesse VSC8662",
@@ -358,6 +529,10 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
        { PHY_ID_VSC8514, 0x000ffff0 },
        { PHY_ID_VSC8572, 0x000ffff0 },
        { PHY_ID_VSC8574, 0x000ffff0 },
+       { PHY_ID_VSC7385, 0x000ffff0 },
+       { PHY_ID_VSC7388, 0x000ffff0 },
+       { PHY_ID_VSC7395, 0x000ffff0 },
+       { PHY_ID_VSC7398, 0x000ffff0 },
        { PHY_ID_VSC8662, 0x000ffff0 },
        { PHY_ID_VSC8221, 0x000ffff0 },
        { PHY_ID_VSC8211, 0x000ffff0 },
index 2e5150b0b8d52c5dd784a3df1818962d64972898..74a8782313cf5b0319a7f3bad936926c4f4481da 100644 (file)
@@ -33,17 +33,22 @@ struct gmii2rgmii {
        struct phy_device *phy_dev;
        struct phy_driver *phy_drv;
        struct phy_driver conv_phy_drv;
-       int addr;
+       struct mdio_device *mdio;
 };
 
 static int xgmiitorgmii_read_status(struct phy_device *phydev)
 {
        struct gmii2rgmii *priv = phydev->priv;
+       struct mii_bus *bus = priv->mdio->bus;
+       int addr = priv->mdio->addr;
        u16 val = 0;
+       int err;
 
-       priv->phy_drv->read_status(phydev);
+       err = priv->phy_drv->read_status(phydev);
+       if (err < 0)
+               return err;
 
-       val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
+       val = mdiobus_read(bus, addr, XILINX_GMII2RGMII_REG);
        val &= ~XILINX_GMII2RGMII_SPEED_MASK;
 
        if (phydev->speed == SPEED_1000)
@@ -53,7 +58,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
        else
                val |= BMCR_SPEED10;
 
-       mdiobus_write(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG, val);
+       mdiobus_write(bus, addr, XILINX_GMII2RGMII_REG, val);
 
        return 0;
 }
@@ -81,7 +86,12 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
                return -EPROBE_DEFER;
        }
 
-       priv->addr = mdiodev->addr;
+       if (!priv->phy_dev->drv) {
+               dev_info(dev, "Attached phy not ready\n");
+               return -EPROBE_DEFER;
+       }
+
+       priv->mdio = mdiodev;
        priv->phy_drv = priv->phy_dev->drv;
        memcpy(&priv->conv_phy_drv, priv->phy_dev->drv,
               sizeof(struct phy_driver));
index de51e8f70f44ea6663b330d2ae41024e99865490..ce61231e96ea5fe27f512fbd0d80d4609997e508 100644 (file)
@@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pppoe_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = sock_no_setsockopt,
index b070959737ffe744f08683926a486c66ee08bb4a..6a047d30e8c69f81cfb234113d66d03d216878ac 100644 (file)
 
 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
 
-static struct team_port *team_port_get_rcu(const struct net_device *dev)
-{
-       return rcu_dereference(dev->rx_handler_data);
-}
-
 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
 {
        struct team_port *port = rtnl_dereference(dev->rx_handler_data);
@@ -1707,7 +1702,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
 }
 
 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
-                            void *accel_priv, select_queue_fallback_t fallback)
+                            struct net_device *sb_dev,
+                            select_queue_fallback_t fallback)
 {
        /*
         * This helper function exists to help dev_pick_tx get the correct
index a192a017cc68878360505b93df151de3d0b9b730..7b5748f86c9c4544c32aab0db22372b3cd01f654 100644 (file)
@@ -607,7 +607,8 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 }
 
 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
-                           void *accel_priv, select_queue_fallback_t fallback)
+                           struct net_device *sb_dev,
+                           select_queue_fallback_t fallback)
 {
        struct tun_struct *tun = netdev_priv(dev);
        u16 ret;
@@ -1268,7 +1269,6 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
                return tun_xdp_set(dev, xdp->prog, xdp->extack);
        case XDP_QUERY_PROG:
                xdp->prog_id = tun_xdp_query(dev);
-               xdp->prog_attached = !!xdp->prog_id;
                return 0;
        default:
                return -EINVAL;
index 3d4f7959dabb9c39e17754df4f72013c89743d5a..8f41c6bda8e5a1fc5faf604cdc49914065be73b0 100644 (file)
@@ -691,24 +691,32 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
        u32 phyid;
        struct asix_common_private *priv;
 
-       usbnet_get_endpoints(dev,intf);
+       usbnet_get_endpoints(dev, intf);
 
-       /* Get the MAC address */
-       if (dev->driver_info->data & FLAG_EEPROM_MAC) {
-               for (i = 0; i < (ETH_ALEN >> 1); i++) {
-                       ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i,
-                                           0, 2, buf + i * 2, 0);
-                       if (ret < 0)
-                               break;
-               }
+       /* Maybe the boot loader passed the MAC address via device tree */
+       if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) {
+               netif_dbg(dev, ifup, dev->net,
+                         "MAC address read from device tree");
        } else {
-               ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
-                               0, 0, ETH_ALEN, buf, 0);
-       }
+               /* Try getting the MAC address from EEPROM */
+               if (dev->driver_info->data & FLAG_EEPROM_MAC) {
+                       for (i = 0; i < (ETH_ALEN >> 1); i++) {
+                               ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM,
+                                                   0x04 + i, 0, 2, buf + i * 2,
+                                                   0);
+                               if (ret < 0)
+                                       break;
+                       }
+               } else {
+                       ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
+                                           0, 0, ETH_ALEN, buf, 0);
+               }
 
-       if (ret < 0) {
-               netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
-               return ret;
+               if (ret < 0) {
+                       netdev_dbg(dev->net, "Failed to read MAC address: %d\n",
+                                  ret);
+                       return ret;
+               }
        }
 
        asix_set_netdev_dev_addr(dev, buf);
index 18d36dff97ea64d623547ebd0ceb3222bfa70163..424053bd8b21bdd000b1432d7f3d9425fbc2be09 100644 (file)
@@ -869,6 +869,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                default:
                        dev_warn(&intf->dev,
                                 "Couldn't detect memory size, assuming 32k\n");
+                       /* fall through */
                case 0x87654321:
                        catc_set_reg(catc, TxBufCount, 4);
                        catc_set_reg(catc, RxBufCount, 16);
index 288ecd9991713dcc0fb93372d39eefe3195db87e..78b16eb9e58c8ddb93e35ed965f00b1f173f3804 100644 (file)
@@ -99,6 +99,7 @@ static void tx_complete(struct urb *req)
        struct net_device *dev = skb->dev;
        struct usbpn_dev *pnd = netdev_priv(dev);
        int status = req->status;
+       unsigned long flags;
 
        switch (status) {
        case 0:
@@ -109,16 +110,17 @@ static void tx_complete(struct urb *req)
        case -ECONNRESET:
        case -ESHUTDOWN:
                dev->stats.tx_aborted_errors++;
+               /* fall through */
        default:
                dev->stats.tx_errors++;
                dev_dbg(&dev->dev, "TX error (%d)\n", status);
        }
        dev->stats.tx_packets++;
 
-       spin_lock(&pnd->tx_lock);
+       spin_lock_irqsave(&pnd->tx_lock, flags);
        pnd->tx_queue--;
        netif_wake_queue(dev);
-       spin_unlock(&pnd->tx_lock);
+       spin_unlock_irqrestore(&pnd->tx_lock, flags);
 
        dev_kfree_skb_any(skb);
        usb_free_urb(req);
index b0e8b9613054137215e2f502f9deeab3bbad8f80..1eaec648bd1f716db3d06622cdfb7834e64e4e38 100644 (file)
@@ -967,8 +967,7 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
 
        atomic_set(&ctx->stop, 1);
 
-       if (hrtimer_active(&ctx->tx_timer))
-               hrtimer_cancel(&ctx->tx_timer);
+       hrtimer_cancel(&ctx->tx_timer);
 
        tasklet_kill(&ctx->bh);
 
index e53883ad61073d428c431e795de1331db87c5070..184c24baca1527333d92ec927c48e958ad6c95f0 100644 (file)
@@ -999,6 +999,7 @@ static void read_bulk_callback(struct urb *urb)
        struct hso_net *odev = urb->context;
        struct net_device *net;
        int result;
+       unsigned long flags;
        int status = urb->status;
 
        /* is al ok?  (Filip: Who's Al ?) */
@@ -1028,11 +1029,11 @@ static void read_bulk_callback(struct urb *urb)
        if (urb->actual_length) {
                /* Handle the IP stream, add header and push it onto network
                 * stack if the packet is complete. */
-               spin_lock(&odev->net_lock);
+               spin_lock_irqsave(&odev->net_lock, flags);
                packetizeRx(odev, urb->transfer_buffer, urb->actual_length,
                            (urb->transfer_buffer_length >
                             urb->actual_length) ? 1 : 0);
-               spin_unlock(&odev->net_lock);
+               spin_unlock_irqrestore(&odev->net_lock, flags);
        }
 
        /* We are done with this URB, resubmit it. Prep the USB to wait for
@@ -1193,6 +1194,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
 {
        struct hso_serial *serial = urb->context;
        int status = urb->status;
+       unsigned long flags;
 
        hso_dbg(0x8, "--- Got serial_read_bulk callback %02x ---\n", status);
 
@@ -1216,10 +1218,10 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
        if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
                fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
        /* Valid data, handle RX data */
-       spin_lock(&serial->serial_lock);
+       spin_lock_irqsave(&serial->serial_lock, flags);
        serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
        put_rxbuf_data_and_resubmit_bulk_urb(serial);
-       spin_unlock(&serial->serial_lock);
+       spin_unlock_irqrestore(&serial->serial_lock, flags);
 }
 
 /*
@@ -1502,12 +1504,13 @@ static void tiocmget_intr_callback(struct urb *urb)
                DUMP(serial_state_notification,
                     sizeof(struct hso_serial_state_notification));
        } else {
+               unsigned long flags;
 
                UART_state_bitmap = le16_to_cpu(serial_state_notification->
                                                UART_state_bitmap);
                prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap;
                icount = &tiocmget->icount;
-               spin_lock(&serial->serial_lock);
+               spin_lock_irqsave(&serial->serial_lock, flags);
                if ((UART_state_bitmap & B_OVERRUN) !=
                   (prev_UART_state_bitmap & B_OVERRUN))
                        icount->parity++;
@@ -1530,7 +1533,7 @@ static void tiocmget_intr_callback(struct urb *urb)
                   (prev_UART_state_bitmap & B_RX_CARRIER))
                        icount->dcd++;
                tiocmget->prev_UART_state_bitmap = UART_state_bitmap;
-               spin_unlock(&serial->serial_lock);
+               spin_unlock_irqrestore(&serial->serial_lock, flags);
                tiocmget->intr_completed = 1;
                wake_up_interruptible(&tiocmget->waitq);
        }
@@ -1729,7 +1732,6 @@ static int hso_serial_ioctl(struct tty_struct *tty,
 /* starts a transmit */
 static void hso_kick_transmit(struct hso_serial *serial)
 {
-       u8 *temp;
        unsigned long flags;
        int res;
 
@@ -1745,14 +1747,12 @@ static void hso_kick_transmit(struct hso_serial *serial)
                goto out;
 
        /* Switch pointers around to avoid memcpy */
-       temp = serial->tx_buffer;
-       serial->tx_buffer = serial->tx_data;
-       serial->tx_data = temp;
+       swap(serial->tx_buffer, serial->tx_data);
        serial->tx_data_count = serial->tx_buffer_count;
        serial->tx_buffer_count = 0;
 
-       /* If temp is set, it means we switched buffers */
-       if (temp && serial->write_data) {
+       /* If serial->tx_data is set, it means we switched buffers */
+       if (serial->tx_data && serial->write_data) {
                res = serial->write_data(serial);
                if (res >= 0)
                        serial->tx_urb_used = 1;
@@ -1852,6 +1852,7 @@ static void intr_callback(struct urb *urb)
        struct hso_serial *serial;
        unsigned char *port_req;
        int status = urb->status;
+       unsigned long flags;
        int i;
 
        usb_mark_last_busy(urb->dev);
@@ -1879,7 +1880,7 @@ static void intr_callback(struct urb *urb)
                        if (serial != NULL) {
                                hso_dbg(0x1, "Pending read interrupt on port %d\n",
                                        i);
-                               spin_lock(&serial->serial_lock);
+                               spin_lock_irqsave(&serial->serial_lock, flags);
                                if (serial->rx_state == RX_IDLE &&
                                        serial->port.count > 0) {
                                        /* Setup and send a ctrl req read on
@@ -1893,7 +1894,8 @@ static void intr_callback(struct urb *urb)
                                        hso_dbg(0x1, "Already a read pending on port %d or port not open\n",
                                                i);
                                }
-                               spin_unlock(&serial->serial_lock);
+                               spin_unlock_irqrestore(&serial->serial_lock,
+                                                      flags);
                        }
                }
        }
@@ -1920,6 +1922,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
 {
        struct hso_serial *serial = urb->context;
        int status = urb->status;
+       unsigned long flags;
 
        /* sanity check */
        if (!serial) {
@@ -1927,9 +1930,9 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
                return;
        }
 
-       spin_lock(&serial->serial_lock);
+       spin_lock_irqsave(&serial->serial_lock, flags);
        serial->tx_urb_used = 0;
-       spin_unlock(&serial->serial_lock);
+       spin_unlock_irqrestore(&serial->serial_lock, flags);
        if (status) {
                handle_usb_error(status, __func__, serial->parent);
                return;
@@ -1971,14 +1974,15 @@ static void ctrl_callback(struct urb *urb)
        struct hso_serial *serial = urb->context;
        struct usb_ctrlrequest *req;
        int status = urb->status;
+       unsigned long flags;
 
        /* sanity check */
        if (!serial)
                return;
 
-       spin_lock(&serial->serial_lock);
+       spin_lock_irqsave(&serial->serial_lock, flags);
        serial->tx_urb_used = 0;
-       spin_unlock(&serial->serial_lock);
+       spin_unlock_irqrestore(&serial->serial_lock, flags);
        if (status) {
                handle_usb_error(status, __func__, serial->parent);
                return;
@@ -1994,9 +1998,9 @@ static void ctrl_callback(struct urb *urb)
            (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) {
                /* response to a read command */
                serial->rx_urb_filled[0] = 1;
-               spin_lock(&serial->serial_lock);
+               spin_lock_irqsave(&serial->serial_lock, flags);
                put_rxbuf_data_and_resubmit_ctrl_urb(serial);
-               spin_unlock(&serial->serial_lock);
+               spin_unlock_irqrestore(&serial->serial_lock, flags);
        } else {
                hso_put_activity(serial->parent);
                tty_port_tty_wakeup(&serial->port);
index f1605833c5cf1d7eebe5f7189f0c4e43ba32e5ad..913e50bab0a2f6c039e30f8defe434284f5a487f 100644 (file)
@@ -587,7 +587,7 @@ static void kaweth_usb_receive(struct urb *urb)
        struct kaweth_device *kaweth = urb->context;
        struct net_device *net = kaweth->net;
        int status = urb->status;
-
+       unsigned long flags;
        int count = urb->actual_length;
        int count2 = urb->transfer_buffer_length;
 
@@ -619,12 +619,12 @@ static void kaweth_usb_receive(struct urb *urb)
                net->stats.rx_errors++;
                dev_dbg(dev, "Status was -EOVERFLOW.\n");
        }
-       spin_lock(&kaweth->device_lock);
+       spin_lock_irqsave(&kaweth->device_lock, flags);
        if (IS_BLOCKED(kaweth->status)) {
-               spin_unlock(&kaweth->device_lock);
+               spin_unlock_irqrestore(&kaweth->device_lock, flags);
                return;
        }
-       spin_unlock(&kaweth->device_lock);
+       spin_unlock_irqrestore(&kaweth->device_lock, flags);
 
        if(status && status != -EREMOTEIO && count != 1) {
                dev_err(&kaweth->intf->dev,
index 8dff87ec6d99c5dca122dcdb5d3697157564cfa2..6f2ea84bf0b27f525a6cd06096d5eea69708ca9c 100644 (file)
@@ -64,6 +64,7 @@
 #define DEFAULT_RX_CSUM_ENABLE         (true)
 #define DEFAULT_TSO_CSUM_ENABLE                (true)
 #define DEFAULT_VLAN_FILTER_ENABLE     (true)
+#define DEFAULT_VLAN_RX_OFFLOAD                (true)
 #define TX_OVERHEAD                    (8)
 #define RXW_PADDING                    2
 
@@ -1720,7 +1721,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
                                  "MAC address read from EEPROM");
                } else {
                        /* generate random MAC */
-                       random_ether_addr(addr);
+                       eth_random_addr(addr);
                        netif_dbg(dev, ifup, dev->net,
                                  "MAC address set to random addr");
                }
@@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
        if ((ll_mtu % dev->maxpacket) == 0)
                return -EDOM;
 
-       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
 
        netdev->mtu = new_mtu;
 
@@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev,
        }
 
        if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
+       else
+               pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
+
+       if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
                pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
        else
                pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
@@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
        buf |= FCT_TX_CTL_EN_;
        ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
 
-       ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+       ret = lan78xx_set_rx_max_frame_length(dev,
+                                             dev->net->mtu + VLAN_ETH_HLEN);
 
        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
        buf |= MAC_RX_RXEN_;
@@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
        if (DEFAULT_TSO_CSUM_ENABLE)
                dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
 
+       if (DEFAULT_VLAN_RX_OFFLOAD)
+               dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+       if (DEFAULT_VLAN_FILTER_ENABLE)
+               dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
        dev->net->hw_features = dev->net->features;
 
        ret = lan78xx_setup_irq_domain(dev);
@@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
                                    struct sk_buff *skb,
                                    u32 rx_cmd_a, u32 rx_cmd_b)
 {
+       /* HW Checksum offload appears to be flawed if used when not stripping
+        * VLAN headers. Drop back to S/W checksums under these conditions.
+        */
        if (!(dev->net->features & NETIF_F_RXCSUM) ||
-           unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+           unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
+           ((rx_cmd_a & RX_CMD_A_FVTG_) &&
+            !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
                skb->ip_summed = CHECKSUM_NONE;
        } else {
                skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
@@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
        }
 }
 
+static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
+                                   struct sk_buff *skb,
+                                   u32 rx_cmd_a, u32 rx_cmd_b)
+{
+       if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+           (rx_cmd_a & RX_CMD_A_FVTG_))
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      (rx_cmd_b & 0xffff));
+}
+
 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
 {
        int             status;
@@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
                        if (skb->len == size) {
                                lan78xx_rx_csum_offload(dev, skb,
                                                        rx_cmd_a, rx_cmd_b);
+                               lan78xx_rx_vlan_offload(dev, skb,
+                                                       rx_cmd_a, rx_cmd_b);
 
                                skb_trim(skb, skb->len - 4); /* remove fcs */
                                skb->truesize = size + sizeof(struct sk_buff);
@@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
                        skb_set_tail_pointer(skb2, size);
 
                        lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+                       lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
 
                        skb_trim(skb2, skb2->len - 4); /* remove fcs */
                        skb2->truesize = size + sizeof(struct sk_buff);
index 8e8b51f171f4fa227340e80009ce5c2c059db053..8fac8e132c5bd250f1a528a020adeb6eb2f75cd1 100644 (file)
@@ -1246,6 +1246,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
+       {QMI_FIXED_INTF(0x413c, 0x81d7, 1)},    /* Dell Wireless 5821e */
        {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},    /* HP lt4120 Snapdragon X5 LTE */
        {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
index 86f7196f9d91fbf55c791fff88687a43518d66d8..124211afb023fc8d729a8208aa43099b2b171690 100644 (file)
@@ -1252,6 +1252,7 @@ static void read_bulk_callback(struct urb *urb)
        int status = urb->status;
        struct rx_agg *agg;
        struct r8152 *tp;
+       unsigned long flags;
 
        agg = urb->context;
        if (!agg)
@@ -1281,9 +1282,9 @@ static void read_bulk_callback(struct urb *urb)
                if (urb->actual_length < ETH_ZLEN)
                        break;
 
-               spin_lock(&tp->rx_lock);
+               spin_lock_irqsave(&tp->rx_lock, flags);
                list_add_tail(&agg->list, &tp->rx_done);
-               spin_unlock(&tp->rx_lock);
+               spin_unlock_irqrestore(&tp->rx_lock, flags);
                napi_schedule(&tp->napi);
                return;
        case -ESHUTDOWN:
@@ -1311,6 +1312,7 @@ static void write_bulk_callback(struct urb *urb)
        struct net_device *netdev;
        struct tx_agg *agg;
        struct r8152 *tp;
+       unsigned long flags;
        int status = urb->status;
 
        agg = urb->context;
@@ -1332,9 +1334,9 @@ static void write_bulk_callback(struct urb *urb)
                stats->tx_bytes += agg->skb_len;
        }
 
-       spin_lock(&tp->tx_lock);
+       spin_lock_irqsave(&tp->tx_lock, flags);
        list_add_tail(&agg->list, &tp->tx_free);
-       spin_unlock(&tp->tx_lock);
+       spin_unlock_irqrestore(&tp->tx_lock, flags);
 
        usb_autopm_put_interface_async(tp->intf);
 
@@ -1374,6 +1376,7 @@ static void intr_callback(struct urb *urb)
        case -ECONNRESET:       /* unlink */
        case -ESHUTDOWN:
                netif_device_detach(tp->netdev);
+               /* fall through */
        case -ENOENT:
        case -EPROTO:
                netif_info(tp, intr, tp->netdev,
@@ -2739,6 +2742,7 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
                        r8152_mdio_write(tp, MII_BMCR, data);
 
                        data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
+                       /* fall through */
 
                default:
                        if (data != PHY_STAT_LAN_ON)
@@ -3962,7 +3966,8 @@ static int rtl8152_close(struct net_device *netdev)
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&tp->pm_notifier);
 #endif
-       napi_disable(&tp->napi);
+       if (!test_bit(RTL8152_UNPLUG, &tp->flags))
+               napi_disable(&tp->napi);
        clear_bit(WORK_ENABLE, &tp->flags);
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
index 5f565bd574da3bc7ce741e3b280a9ff5dece4352..0e81d4c441d9172f8b81c2f7bc0e9ed0d2c555c3 100644 (file)
@@ -391,6 +391,7 @@ static void read_bulk_callback(struct urb *urb)
        u16 rx_stat;
        int status = urb->status;
        int result;
+       unsigned long flags;
 
        dev = urb->context;
        if (!dev)
@@ -432,9 +433,9 @@ static void read_bulk_callback(struct urb *urb)
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += pkt_len;
 
-       spin_lock(&dev->rx_pool_lock);
+       spin_lock_irqsave(&dev->rx_pool_lock, flags);
        skb = pull_skb(dev);
-       spin_unlock(&dev->rx_pool_lock);
+       spin_unlock_irqrestore(&dev->rx_pool_lock, flags);
        if (!skb)
                goto resched;
 
index b6c9a2af37328d1037c3b0ba761256092556167e..2ff08bc103a9df8c114632ae6887226ec2dd24b6 100644 (file)
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
 #define VIRTIO_XDP_HEADROOM 256
 
+/* Separating two types of XDP xmit */
+#define VIRTIO_XDP_TX          BIT(0)
+#define VIRTIO_XDP_REDIR       BIT(1)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                     struct receive_queue *rq,
                                     void *buf, void *ctx,
                                     unsigned int len,
-                                    bool *xdp_xmit)
+                                    unsigned int *xdp_xmit)
 {
        struct sk_buff *skb;
        struct bpf_prog *xdp_prog;
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                trace_xdp_exception(vi->dev, xdp_prog, act);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_TX;
                        rcu_read_unlock();
                        goto xdp_xmit;
                case XDP_REDIRECT:
                        err = xdp_do_redirect(dev, &xdp, xdp_prog);
                        if (err)
                                goto err_xdp;
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        rcu_read_unlock();
                        goto xdp_xmit;
                default:
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                         void *buf,
                                         void *ctx,
                                         unsigned int len,
-                                        bool *xdp_xmit)
+                                        unsigned int *xdp_xmit)
 {
        struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
        u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                        put_page(xdp_page);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_TX;
                        if (unlikely(xdp_page != page))
                                put_page(page);
                        rcu_read_unlock();
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                        put_page(xdp_page);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        if (unlikely(xdp_page != page))
                                put_page(page);
                        rcu_read_unlock();
@@ -939,7 +943,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 }
 
 static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
-                      void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
+                      void *buf, unsigned int len, void **ctx,
+                      unsigned int *xdp_xmit)
 {
        struct net_device *dev = vi->dev;
        struct sk_buff *skb;
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
        }
 }
 
-static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
+static int virtnet_receive(struct receive_queue *rq, int budget,
+                          unsigned int *xdp_xmit)
 {
        struct virtnet_info *vi = rq->vq->vdev->priv;
        unsigned int len, received = 0, bytes = 0;
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        struct virtnet_info *vi = rq->vq->vdev->priv;
        struct send_queue *sq;
        unsigned int received, qp;
-       bool xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
 
        virtnet_poll_cleantx(rq);
 
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        if (received < budget)
                virtqueue_napi_complete(napi, rq->vq, received);
 
-       if (xdp_xmit) {
+       if (xdp_xmit & VIRTIO_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & VIRTIO_XDP_TX) {
                qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
                     smp_processor_id();
                sq = &vi->sq[qp];
                virtqueue_kick(sq->vq);
-               xdp_do_flush_map();
        }
 
        return received;
@@ -2335,7 +2343,6 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
                return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
        case XDP_QUERY_PROG:
                xdp->prog_id = virtnet_xdp_query(dev);
-               xdp->prog_attached = !!xdp->prog_id;
                return 0;
        default:
                return -EINVAL;
index aee0e60471f10d59c39ad39f8170eedea722455d..ababba37d735d62b7fe0500983f411d7806baa17 100644 (file)
@@ -568,11 +568,12 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
        return vh;
 }
 
-static struct sk_buff **vxlan_gro_receive(struct sock *sk,
-                                         struct sk_buff **head,
-                                         struct sk_buff *skb)
+static struct sk_buff *vxlan_gro_receive(struct sock *sk,
+                                        struct list_head *head,
+                                        struct sk_buff *skb)
 {
-       struct sk_buff *p, **pp = NULL;
+       struct sk_buff *pp = NULL;
+       struct sk_buff *p;
        struct vxlanhdr *vh, *vh2;
        unsigned int hlen, off_vx;
        int flush = 1;
@@ -607,7 +608,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
 
        skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
 
-       for (p = *head; p; p = p->next) {
+       list_for_each_entry(p, head, list) {
                if (!NAPI_GRO_CB(p)->same_flow)
                        continue;
 
@@ -623,9 +624,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
        flush = 0;
 
 out:
-       skb_gro_remcsum_cleanup(skb, &grc);
-       skb->remcsum_offload = 0;
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
 
        return pp;
 }
@@ -638,8 +637,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
        return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
 }
 
-/* Add new entry to forwarding table -- assumes lock held */
+static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
+                                        const u8 *mac, __u16 state,
+                                        __be32 src_vni, __u8 ndm_flags)
+{
+       struct vxlan_fdb *f;
+
+       f = kmalloc(sizeof(*f), GFP_ATOMIC);
+       if (!f)
+               return NULL;
+       f->state = state;
+       f->flags = ndm_flags;
+       f->updated = f->used = jiffies;
+       f->vni = src_vni;
+       INIT_LIST_HEAD(&f->remotes);
+       memcpy(f->eth_addr, mac, ETH_ALEN);
+
+       return f;
+}
+
 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
+                           const u8 *mac, union vxlan_addr *ip,
+                           __u16 state, __be16 port, __be32 src_vni,
+                           __be32 vni, __u32 ifindex, __u8 ndm_flags,
+                           struct vxlan_fdb **fdb)
+{
+       struct vxlan_rdst *rd = NULL;
+       struct vxlan_fdb *f;
+       int rc;
+
+       if (vxlan->cfg.addrmax &&
+           vxlan->addrcnt >= vxlan->cfg.addrmax)
+               return -ENOSPC;
+
+       netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
+       f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
+       if (!f)
+               return -ENOMEM;
+
+       rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+       if (rc < 0) {
+               kfree(f);
+               return rc;
+       }
+
+       ++vxlan->addrcnt;
+       hlist_add_head_rcu(&f->hlist,
+                          vxlan_fdb_head(vxlan, mac, src_vni));
+
+       *fdb = f;
+
+       return 0;
+}
+
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_update(struct vxlan_dev *vxlan,
                            const u8 *mac, union vxlan_addr *ip,
                            __u16 state, __u16 flags,
                            __be16 port, __be32 src_vni, __be32 vni,
@@ -689,37 +741,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                if (!(flags & NLM_F_CREATE))
                        return -ENOENT;
 
-               if (vxlan->cfg.addrmax &&
-                   vxlan->addrcnt >= vxlan->cfg.addrmax)
-                       return -ENOSPC;
-
                /* Disallow replace to add a multicast entry */
                if ((flags & NLM_F_REPLACE) &&
                    (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
                        return -EOPNOTSUPP;
 
                netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
-               f = kmalloc(sizeof(*f), GFP_ATOMIC);
-               if (!f)
-                       return -ENOMEM;
-
-               notify = 1;
-               f->state = state;
-               f->flags = ndm_flags;
-               f->updated = f->used = jiffies;
-               f->vni = src_vni;
-               INIT_LIST_HEAD(&f->remotes);
-               memcpy(f->eth_addr, mac, ETH_ALEN);
-
-               rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
-               if (rc < 0) {
-                       kfree(f);
+               rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
+                                     vni, ifindex, ndm_flags, &f);
+               if (rc < 0)
                        return rc;
-               }
-
-               ++vxlan->addrcnt;
-               hlist_add_head_rcu(&f->hlist,
-                                  vxlan_fdb_head(vxlan, mac, src_vni));
+               notify = 1;
        }
 
        if (notify) {
@@ -743,13 +775,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
        kfree(f);
 }
 
-static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+                             bool do_notify)
 {
        netdev_dbg(vxlan->dev,
                    "delete %pM\n", f->eth_addr);
 
        --vxlan->addrcnt;
-       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
+       if (do_notify)
+               vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
 
        hlist_del_rcu(&f->hlist);
        call_rcu(&f->rcu, vxlan_fdb_free);
@@ -865,7 +899,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                return -EAFNOSUPPORT;
 
        spin_lock_bh(&vxlan->hash_lock);
-       err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
+       err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
                               port, src_vni, vni, ifindex, ndm->ndm_flags);
        spin_unlock_bh(&vxlan->hash_lock);
 
@@ -899,7 +933,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
                goto out;
        }
 
-       vxlan_fdb_destroy(vxlan, f);
+       vxlan_fdb_destroy(vxlan, f, true);
 
 out:
        return 0;
@@ -1008,7 +1042,7 @@ static bool vxlan_snoop(struct net_device *dev,
 
                /* close off race between vxlan_flush and incoming packets */
                if (netif_running(dev))
-                       vxlan_fdb_create(vxlan, src_mac, src_ip,
+                       vxlan_fdb_update(vxlan, src_mac, src_ip,
                                         NUD_REACHABLE,
                                         NLM_F_EXCL|NLM_F_CREATE,
                                         vxlan->cfg.dst_port,
@@ -2121,7 +2155,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                vni = tunnel_id_to_key32(info->key.tun_id);
                ifindex = 0;
                dst_cache = &info->dst_cache;
-               if (info->options_len)
+               if (info->options_len &&
+                   info->key.tun_flags & TUNNEL_VXLAN_OPT)
                        md = ip_tunnel_info_opts(info);
                ttl = info->key.ttl;
                tos = info->key.tos;
@@ -2366,7 +2401,7 @@ static void vxlan_cleanup(struct timer_list *t)
                                           "garbage collect %pM\n",
                                           f->eth_addr);
                                f->state = NUD_STALE;
-                               vxlan_fdb_destroy(vxlan, f);
+                               vxlan_fdb_destroy(vxlan, f, true);
                        } else if (time_before(timeout, next_timer))
                                next_timer = timeout;
                }
@@ -2417,7 +2452,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
        spin_lock_bh(&vxlan->hash_lock);
        f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
        if (f)
-               vxlan_fdb_destroy(vxlan, f);
+               vxlan_fdb_destroy(vxlan, f, true);
        spin_unlock_bh(&vxlan->hash_lock);
 }
 
@@ -2471,7 +2506,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
                                continue;
                        /* the all_zeros_mac entry is deleted at vxlan_uninit */
                        if (!is_zero_ether_addr(f->eth_addr))
-                               vxlan_fdb_destroy(vxlan, f);
+                               vxlan_fdb_destroy(vxlan, f, true);
                }
        }
        spin_unlock_bh(&vxlan->hash_lock);
@@ -3162,6 +3197,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_fdb *f = NULL;
        int err;
 
        err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3175,24 +3211,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
                err = vxlan_fdb_create(vxlan, all_zeros_mac,
                                       &vxlan->default_dst.remote_ip,
                                       NUD_REACHABLE | NUD_PERMANENT,
-                                      NLM_F_EXCL | NLM_F_CREATE,
                                       vxlan->cfg.dst_port,
                                       vxlan->default_dst.remote_vni,
                                       vxlan->default_dst.remote_vni,
                                       vxlan->default_dst.remote_ifindex,
-                                      NTF_SELF);
+                                      NTF_SELF, &f);
                if (err)
                        return err;
        }
 
        err = register_netdevice(dev);
+       if (err)
+               goto errout;
+
+       err = rtnl_configure_link(dev, NULL);
        if (err) {
-               vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
-               return err;
+               unregister_netdevice(dev);
+               goto errout;
        }
 
+       /* notify default fdb entry */
+       if (f)
+               vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+
        list_add(&vxlan->next, &vn->vxlan_list);
        return 0;
+errout:
+       if (f)
+               vxlan_fdb_destroy(vxlan, f, false);
+       return err;
 }
 
 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
@@ -3427,6 +3474,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        struct vxlan_rdst *dst = &vxlan->default_dst;
        struct vxlan_rdst old_dst;
        struct vxlan_config conf;
+       struct vxlan_fdb *f = NULL;
        int err;
 
        err = vxlan_nl2conf(tb, data,
@@ -3455,16 +3503,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                        err = vxlan_fdb_create(vxlan, all_zeros_mac,
                                               &dst->remote_ip,
                                               NUD_REACHABLE | NUD_PERMANENT,
-                                              NLM_F_CREATE | NLM_F_APPEND,
                                               vxlan->cfg.dst_port,
                                               dst->remote_vni,
                                               dst->remote_vni,
                                               dst->remote_ifindex,
-                                              NTF_SELF);
+                                              NTF_SELF, &f);
                        if (err) {
                                spin_unlock_bh(&vxlan->hash_lock);
                                return err;
                        }
+                       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
                }
                spin_unlock_bh(&vxlan->hash_lock);
        }
index bd46b2552980878c1cb67d222428e320ae5d63ef..2a3f0f1a2b0a2c10ce4530f2e3d48c66a2b5d59f 100644 (file)
@@ -2134,7 +2134,6 @@ static void
 fst_openport(struct fst_port_info *port)
 {
        int signals;
-       int txq_length;
 
        /* Only init things if card is actually running. This allows open to
         * succeed for downloads etc.
@@ -2161,7 +2160,6 @@ fst_openport(struct fst_port_info *port)
                else
                        netif_carrier_off(port_to_dev(port));
 
-               txq_length = port->txqe - port->txqs;
                port->txqe = 0;
                port->txqs = 0;
        }
index 90a4ad9a2d081eb582570476a41a55f71e911dd6..093bd21f574d553db0f0cee44679540e5430fc47 100644 (file)
@@ -1491,7 +1491,6 @@ static int lmc_rx(struct net_device *dev)
     lmc_softc_t *sc = dev_to_sc(dev);
     int i;
     int rx_work_limit = LMC_RXDESCS;
-    unsigned int next_rx;
     int rxIntLoopCnt;          /* debug -baz */
     int localLengthErrCnt = 0;
     long stat;
@@ -1505,7 +1504,6 @@ static int lmc_rx(struct net_device *dev)
     rxIntLoopCnt = 0;          /* debug -baz */
 
     i = sc->lmc_next_rx % LMC_RXDESCS;
-    next_rx = sc->lmc_next_rx;
 
     while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
     {
index 4c417903e9be9f5bb74a7901f179488e812d8038..094cea775d0c0bd3090102cf5d511d08d718fef7 100644 (file)
@@ -566,13 +566,12 @@ static void i2400m_msg_ack_hook(struct i2400m *i2400m,
 {
        int result;
        struct device *dev = i2400m_dev(i2400m);
-       unsigned ack_type, ack_status;
+       unsigned int ack_type;
        char strerr[32];
 
        /* Chew on the message, we might need some information from
         * here */
        ack_type = le16_to_cpu(l3l4_hdr->type);
-       ack_status = le16_to_cpu(l3l4_hdr->status);
        switch (ack_type) {
        case I2400M_MT_CMD_ENTER_POWERSAVE:
                /* This is just left here for the sake of example, as
index a89b5685e68b36d5735bc9591f97f621440cb424..e9fc168bb734504e535be77067dd250a8197bffe 100644 (file)
@@ -1552,7 +1552,6 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
        int ret, itr;
        struct device *dev = i2400m_dev(i2400m);
        struct i2400m_fw *i2400m_fw;
-       const struct i2400m_bcf_hdr *bcf;       /* Firmware data */
        const struct firmware *fw;
        const char *fw_name;
 
@@ -1574,7 +1573,7 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
        }
 
        /* Load firmware files to memory. */
-       for (itr = 0, bcf = NULL, ret = -ENOENT; ; itr++) {
+       for (itr = 0, ret = -ENOENT; ; itr++) {
                fw_name = i2400m->bus_fw_names[itr];
                if (fw_name == NULL) {
                        dev_err(dev, "Could not find a usable firmware image\n");
index a654687b5fa2276602f9d6d097bf1d60c3f38fa4..9ab3f0fdfea43c2118cb45fcfa9404809160fac8 100644 (file)
@@ -535,14 +535,12 @@ void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
 {
        struct net_device *net_dev = i2400m->wimax_dev.net_dev;
        struct device *dev = i2400m_dev(i2400m);
-       int protocol;
 
        d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
                  i2400m, skb, skb->len, cs);
        switch(cs) {
        case I2400M_CS_IPV4_0:
        case I2400M_CS_IPV4:
-               protocol = ETH_P_IP;
                i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
                                          skb->data - ETH_HLEN,
                                          cpu_to_be16(ETH_P_IP));
index e60bea4604e4104dbcc837ddf23ecd890a299e2e..1665066f4e242a5a0b2441c7d74d591234906d02 100644 (file)
@@ -496,7 +496,7 @@ static void ath9k_hw_init_macaddr(struct ath_hw *ah)
        ath_err(common, "eeprom contains invalid mac address: %pM\n",
                common->macaddr);
 
-       random_ether_addr(common->macaddr);
+       eth_random_addr(common->macaddr);
        ath_err(common, "random mac address will be used: %pM\n",
                common->macaddr);
 
index 9d99eb42d9176f0f833048b3f87a906542c9e90c..6acba67bca07abd7d662466b4422295dff359a33 100644 (file)
@@ -60,7 +60,6 @@ config BRCMFMAC_PCIE
        bool "PCIE bus interface support for FullMAC driver"
        depends on BRCMFMAC
        depends on PCI
-       depends on HAS_DMA
        select BRCMFMAC_PROTO_MSGBUF
        select FW_LOADER
        ---help---
index 510f6b8e717d7f52eb2cdbb6e334e062f49053af..fa3e8ddfe9a93f78382aaa5031fcd15a29b7be09 100644 (file)
@@ -1279,7 +1279,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
 
 static u16
 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
-                               void *accel_priv, select_queue_fallback_t fallback)
+                               struct net_device *sb_dev,
+                               select_queue_fallback_t fallback)
 {
        skb->priority = cfg80211_classify8021d(skb, NULL);
        return mwifiex_1d_to_wmm_queue[skb->priority];
index 025fa6018550895ae529c7222d9595a1fb621748..8d1492a90bd135c09213f05d52ff85682a80de71 100644 (file)
@@ -7,7 +7,7 @@ config QTNFMAC
 config QTNFMAC_PEARL_PCIE
        tristate "Quantenna QSR10g PCIe support"
        default n
-       depends on HAS_DMA && PCI && CFG80211
+       depends on PCI && CFG80211
        select QTNFMAC
        select FW_LOADER
        select CRC32
index 39c817eddd78e9cf736fbbd440c6617867afad20..31bd6f714052c8fdff09e73b4bf0d5a50959b197 100644 (file)
@@ -1904,7 +1904,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
                 reject_agg, ctrl_agg_size, agg_size);
 
        rtlpriv->hw->max_rx_aggregation_subframes =
-               (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF);
+               (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
 }
 EXPORT_SYMBOL(rtl_rx_ampdu_apply);
 
index 78ebe494fef02b8d31505262f8c551e27aee7dc5..92274c2372008a57ba12ca960bafa84cd2eac7b3 100644 (file)
@@ -148,14 +148,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
 }
 
 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
-                              void *accel_priv,
+                              struct net_device *sb_dev,
                               select_queue_fallback_t fallback)
 {
        struct xenvif *vif = netdev_priv(dev);
        unsigned int size = vif->hash.size;
 
        if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
-               return fallback(dev, skb) % dev->real_num_tx_queues;
+               return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
 
        xenvif_set_skb_hash(vif, skb);
 
index 922ce0abf5cf105a5394285b07356ebcad055d78..d67cd379d156df18ca224c9e15e173d676d84440 100644 (file)
@@ -545,7 +545,8 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
 }
 
 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
-                              void *accel_priv, select_queue_fallback_t fallback)
+                              struct net_device *sb_dev,
+                              select_queue_fallback_t fallback)
 {
        unsigned int num_queues = dev->real_num_tx_queues;
        u32 hash;
@@ -1810,7 +1811,7 @@ static int talk_to_netback(struct xenbus_device *dev,
        err = xen_net_read_mac(dev, info->netdev->dev_addr);
        if (err) {
                xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-               goto out;
+               goto out_unlocked;
        }
 
        rtnl_lock();
@@ -1925,6 +1926,7 @@ static int talk_to_netback(struct xenbus_device *dev,
        xennet_destroy_queues(info);
  out:
        rtnl_unlock();
+out_unlocked:
        device_unregister(&dev->dev);
        return err;
 }
@@ -1950,10 +1952,6 @@ static int xennet_connect(struct net_device *dev)
        /* talk_to_netback() sets the correct number of queues */
        num_queues = dev->real_num_tx_queues;
 
-       rtnl_lock();
-       netdev_update_features(dev);
-       rtnl_unlock();
-
        if (dev->reg_state == NETREG_UNINITIALIZED) {
                err = register_netdev(dev);
                if (err) {
@@ -1963,6 +1961,10 @@ static int xennet_connect(struct net_device *dev)
                }
        }
 
+       rtnl_lock();
+       netdev_update_features(dev);
+       rtnl_unlock();
+
        /*
         * All public and private state should now be sane.  Get
         * ready to start sending and receiving packets and give the driver
index d5553c47014fade81a4f461903b3cb6c4372ccf5..5d823e965883b0f5f23db5ab39afc9f96a128267 100644 (file)
@@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb)
        struct sk_buff *skb = NULL;
 
        if (!urb->status) {
-               skb = alloc_skb(urb->actual_length, GFP_KERNEL);
+               skb = alloc_skb(urb->actual_length, GFP_ATOMIC);
                if (!skb) {
                        nfc_err(&phy->udev->dev, "failed to alloc memory\n");
                } else {
@@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
 
        if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
                /* request for response for sent packet directly */
-               rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+               rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
                if (rc)
                        goto error;
        } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
index 68940356cad3f100f4cfbdd325d42235ea3c5da4..8b1fd7f1a224eedebf08cddfe2258949c50a6bcf 100644 (file)
@@ -414,7 +414,8 @@ static int pmem_attach_disk(struct device *dev,
        blk_queue_logical_block_size(q, pmem_sector_size(ndns));
        blk_queue_max_hw_sectors(q, UINT_MAX);
        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-       blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+       if (pmem->pfn_flags & PFN_MAP)
+               blk_queue_flag_set(QUEUE_FLAG_DAX, q);
        q->queuedata = pmem;
 
        disk = alloc_disk_node(0, nid);
index 21710a7460c823bbc4f84134d7ecce70d3f993ba..46df030b2c3f74f33621dc7d4512b17fe40fd079 100644 (file)
@@ -1808,6 +1808,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                u32 max_segments =
                        (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
 
+               max_segments = min_not_zero(max_segments, ctrl->max_segments);
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
index b528a2f5826cbfe19b22aadd7e09e1ceff512cb6..41d45a1b5c628c143caac1886bd52f08a360f15e 100644 (file)
@@ -2790,6 +2790,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
        /* re-enable the admin_q so anything new can fast fail */
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
+       /* resume the io queues so that things will fast fail */
+       nvme_start_queues(&ctrl->ctrl);
+
        nvme_fc_ctlr_inactive_on_rport(ctrl);
 }
 
@@ -2804,9 +2807,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
         * waiting for io to terminate
         */
        nvme_fc_delete_association(ctrl);
-
-       /* resume the io queues so that things will fast fail */
-       nvme_start_queues(nctrl);
 }
 
 static void
index 231807cbc849869afcbc16fce2e3389539ce2684..0c4a33df3b2f3bb9d8a710556dab3f8c55ed2302 100644 (file)
@@ -170,6 +170,7 @@ struct nvme_ctrl {
        u64 cap;
        u32 page_size;
        u32 max_hw_sectors;
+       u32 max_segments;
        u16 oncs;
        u16 oacs;
        u16 nssa;
index fc33804662e7bd35cfbacd93a26101bf23b3f43d..ba943f211687c638cab9708dbb5011b7e3d53b2f 100644 (file)
 
 #define SGES_PER_PAGE  (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
 
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 4096
+#define NVME_MAX_SEGS  127
+
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
@@ -100,6 +107,8 @@ struct nvme_dev {
        struct nvme_ctrl ctrl;
        struct completion ioq_wait;
 
+       mempool_t *iod_mempool;
+
        /* shadow doorbell buffer support: */
        u32 *dbbuf_dbs;
        dma_addr_t dbbuf_dbs_dma_addr;
@@ -477,10 +486,7 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
        iod->use_sgl = nvme_pci_use_sgls(dev, rq);
 
        if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
-               size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
-                               iod->use_sgl);
-
-               iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
+               iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
                if (!iod->sg)
                        return BLK_STS_RESOURCE;
        } else {
@@ -526,7 +532,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
        }
 
        if (iod->sg != iod->inline_sg)
-               kfree(iod->sg);
+               mempool_free(iod->sg, dev->iod_mempool);
 }
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -2280,6 +2286,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
                blk_put_queue(dev->ctrl.admin_q);
        kfree(dev->queues);
        free_opal_dev(dev->ctrl.opal_dev);
+       mempool_destroy(dev->iod_mempool);
        kfree(dev);
 }
 
@@ -2289,6 +2296,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
 
        nvme_get_ctrl(&dev->ctrl);
        nvme_dev_disable(dev, false);
+       nvme_kill_queues(&dev->ctrl);
        if (!queue_work(nvme_wq, &dev->remove_work))
                nvme_put_ctrl(&dev->ctrl);
 }
@@ -2333,6 +2341,13 @@ static void nvme_reset_work(struct work_struct *work)
        if (result)
                goto out;
 
+       /*
+        * Limit the max command size to prevent iod->sg allocations going
+        * over a single page.
+        */
+       dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+       dev->ctrl.max_segments = NVME_MAX_SEGS;
+
        result = nvme_init_identify(&dev->ctrl);
        if (result)
                goto out;
@@ -2405,7 +2420,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
        struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
-       nvme_kill_queues(&dev->ctrl);
        if (pci_get_drvdata(pdev))
                device_release_driver(&pdev->dev);
        nvme_put_ctrl(&dev->ctrl);
@@ -2509,6 +2523,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        int node, result = -ENOMEM;
        struct nvme_dev *dev;
        unsigned long quirks = id->driver_data;
+       size_t alloc_size;
 
        node = dev_to_node(&pdev->dev);
        if (node == NUMA_NO_NODE)
@@ -2546,6 +2561,23 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto release_pools;
 
+       /*
+        * Double check that our mempool alloc size will cover the biggest
+        * command we support.
+        */
+       alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
+                                               NVME_MAX_SEGS, true);
+       WARN_ON_ONCE(alloc_size > PAGE_SIZE);
+
+       dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+                                               mempool_kfree,
+                                               (void *) alloc_size,
+                                               GFP_KERNEL, node);
+       if (!dev->iod_mempool) {
+               result = -ENOMEM;
+               goto release_pools;
+       }
+
        dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
        nvme_get_ctrl(&dev->ctrl);
index c9424da0d23e3cbbdd0e2b5209d9eddca9f1591f..518c5b09038c1e9041a89590a6d79101995650d4 100644 (file)
@@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
        if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
                return;
 
-       if (nvme_rdma_queue_idx(queue) == 0) {
-               nvme_rdma_free_qe(queue->device->dev,
-                       &queue->ctrl->async_event_sqe,
-                       sizeof(struct nvme_command), DMA_TO_DEVICE);
-       }
-
        nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
 }
@@ -698,7 +692,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
                set = &ctrl->tag_set;
                memset(set, 0, sizeof(*set));
                set->ops = &nvme_rdma_mq_ops;
-               set->queue_depth = nctrl->opts->queue_size;
+               set->queue_depth = nctrl->sqsize + 1;
                set->reserved_tags = 1; /* fabric connect */
                set->numa_node = NUMA_NO_NODE;
                set->flags = BLK_MQ_F_SHOULD_MERGE;
@@ -734,11 +728,15 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       nvme_rdma_stop_queue(&ctrl->queues[0]);
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.admin_q);
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
        }
+       if (ctrl->async_event_sqe.data) {
+               nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                               sizeof(struct nvme_command), DMA_TO_DEVICE);
+               ctrl->async_event_sqe.data = NULL;
+       }
        nvme_rdma_free_queue(&ctrl->queues[0]);
 }
 
@@ -755,11 +753,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 
        ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
 
+       error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                       sizeof(struct nvme_command), DMA_TO_DEVICE);
+       if (error)
+               goto out_free_queue;
+
        if (new) {
                ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
                if (IS_ERR(ctrl->ctrl.admin_tagset)) {
                        error = PTR_ERR(ctrl->ctrl.admin_tagset);
-                       goto out_free_queue;
+                       goto out_free_async_qe;
                }
 
                ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
@@ -795,12 +798,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
        if (error)
                goto out_stop_queue;
 
-       error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
-                       &ctrl->async_event_sqe, sizeof(struct nvme_command),
-                       DMA_TO_DEVICE);
-       if (error)
-               goto out_stop_queue;
-
        return 0;
 
 out_stop_queue:
@@ -811,6 +808,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 out_free_tagset:
        if (new)
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+out_free_async_qe:
+       nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+               sizeof(struct nvme_command), DMA_TO_DEVICE);
 out_free_queue:
        nvme_rdma_free_queue(&ctrl->queues[0]);
        return error;
@@ -819,7 +819,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       nvme_rdma_stop_io_queues(ctrl);
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.connect_q);
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
@@ -888,9 +887,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
        list_del(&ctrl->list);
        mutex_unlock(&nvme_rdma_ctrl_mutex);
 
-       kfree(ctrl->queues);
        nvmf_free_options(nctrl->opts);
 free_ctrl:
+       kfree(ctrl->queues);
        kfree(ctrl);
 }
 
@@ -949,6 +948,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        return;
 
 destroy_admin:
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        nvme_rdma_destroy_admin_queue(ctrl, false);
 requeue:
        dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
@@ -965,12 +965,14 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_rdma_stop_io_queues(ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                        nvme_cancel_request, &ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, false);
        }
 
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
        nvme_rdma_destroy_admin_queue(ctrl, false);
@@ -1736,6 +1738,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
 {
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_rdma_stop_io_queues(ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                        nvme_cancel_request, &ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, shutdown);
@@ -1747,6 +1750,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
                nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
 
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1932,11 +1936,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                goto out_free_ctrl;
        }
 
-       ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
-                               0 /* no quirks, we're perfect! */);
-       if (ret)
-               goto out_free_ctrl;
-
        INIT_DELAYED_WORK(&ctrl->reconnect_work,
                        nvme_rdma_reconnect_ctrl_work);
        INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
@@ -1950,14 +1949,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
                                GFP_KERNEL);
        if (!ctrl->queues)
-               goto out_uninit_ctrl;
+               goto out_free_ctrl;
+
+       ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+                               0 /* no quirks, we're perfect! */);
+       if (ret)
+               goto out_kfree_queues;
 
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
        WARN_ON_ONCE(!changed);
 
        ret = nvme_rdma_configure_admin_queue(ctrl, true);
        if (ret)
-               goto out_kfree_queues;
+               goto out_uninit_ctrl;
 
        /* sanity check icdoff */
        if (ctrl->ctrl.icdoff) {
@@ -1974,20 +1978,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                goto out_remove_admin_queue;
        }
 
-       if (opts->queue_size > ctrl->ctrl.maxcmd) {
-               /* warn if maxcmd is lower than queue_size */
-               dev_warn(ctrl->ctrl.device,
-                       "queue_size %zu > ctrl maxcmd %u, clamping down\n",
-                       opts->queue_size, ctrl->ctrl.maxcmd);
-               opts->queue_size = ctrl->ctrl.maxcmd;
-       }
-
+       /* only warn if argument is too large here, will clamp later */
        if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
-               /* warn if sqsize is lower than queue_size */
                dev_warn(ctrl->ctrl.device,
                        "queue_size %zu > ctrl sqsize %u, clamping down\n",
                        opts->queue_size, ctrl->ctrl.sqsize + 1);
-               opts->queue_size = ctrl->ctrl.sqsize + 1;
+       }
+
+       /* warn if maxcmd is lower than sqsize+1 */
+       if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
+               dev_warn(ctrl->ctrl.device,
+                       "sqsize %u > ctrl maxcmd %u, clamping down\n",
+                       ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
+               ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
        }
 
        if (opts->nr_io_queues) {
@@ -2013,15 +2016,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        return &ctrl->ctrl;
 
 out_remove_admin_queue:
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        nvme_rdma_destroy_admin_queue(ctrl, true);
-out_kfree_queues:
-       kfree(ctrl->queues);
 out_uninit_ctrl:
        nvme_uninit_ctrl(&ctrl->ctrl);
        nvme_put_ctrl(&ctrl->ctrl);
        if (ret > 0)
                ret = -EIO;
        return ERR_PTR(ret);
+out_kfree_queues:
+       kfree(ctrl->queues);
 out_free_ctrl:
        kfree(ctrl);
        return ERR_PTR(ret);
index a03da764ecae8cb3ec9bf0bb9c68971669b895fc..74d4b785d2daac7d203108f06286221b5337f993 100644 (file)
@@ -686,6 +686,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
        }
 
        ctrl->csts = NVME_CSTS_RDY;
+
+       /*
+        * Controllers that are not yet enabled should not really enforce the
+        * keep alive timeout, but we still want to track a timeout and cleanup
+        * in case a host died before it enabled the controller.  Hence, simply
+        * reset the keep alive timer when the controller is enabled.
+        */
+       mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
index d963baf8e53a22b53125a60b8c5c4dba1b1d5a33..e92391d6d1bd6a52de298fc53dedf5902016b005 100644 (file)
@@ -367,14 +367,23 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
        phy_interface_t iface;
        struct device_node *phy_np;
        struct phy_device *phy;
+       int ret;
 
        iface = of_get_phy_mode(np);
        if (iface < 0)
                return NULL;
-
-       phy_np = of_parse_phandle(np, "phy-handle", 0);
-       if (!phy_np)
-               return NULL;
+       if (of_phy_is_fixed_link(np)) {
+               ret = of_phy_register_fixed_link(np);
+               if (ret < 0) {
+                       netdev_err(dev, "broken fixed-link specification\n");
+                       return NULL;
+               }
+               phy_np = of_node_get(np);
+       } else {
+               phy_np = of_parse_phandle(np, "phy-handle", 0);
+               if (!phy_np)
+                       return NULL;
+       }
 
        phy = of_phy_connect(dev, phy_np, hndlr, 0, iface);
 
index ab2f3fead6b1ceee55b0ced767dafdacb1202b35..31ff03dbeb83771be1ba57fde89ea6c32f63c2aa 100644 (file)
@@ -598,7 +598,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
        }
 
        /* Scaling up? Scale voltage before frequency */
-       if (freq > old_freq) {
+       if (freq >= old_freq) {
                ret = _set_opp_voltage(dev, reg, new_supply);
                if (ret)
                        goto restore_voltage;
index 535201984b8b0c5c0c58d585528b7593bbcf61be..1b2cfe51e8d719ce6cd6976fbaad485cc8143fd8 100644 (file)
@@ -28,10 +28,10 @@ obj-$(CONFIG_PCI_PF_STUB)   += pci-pf-stub.o
 obj-$(CONFIG_PCI_ECAM)         += ecam.o
 obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
 
-obj-y                          += controller/
-obj-y                          += switch/
-
 # Endpoint library must be initialized before its users
 obj-$(CONFIG_PCI_ENDPOINT)     += endpoint/
 
+obj-y                          += controller/
+obj-y                          += switch/
+
 ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
index 18fa09b3ac8f2c377ccd8e9ba890f01d66b3c367..cc9fa02d32a08e6051e2adbcee288b6b4f629f14 100644 (file)
@@ -96,7 +96,6 @@ config PCI_HOST_GENERIC
        depends on OF
        select PCI_HOST_COMMON
        select IRQ_DOMAIN
-       select PCI_DOMAINS
        help
          Say Y here if you want to support a simple generic PCI host
          controller, such as the one emulated by kvmtool.
@@ -138,7 +137,6 @@ config PCI_VERSATILE
 
 config PCIE_IPROC
        tristate
-       select PCI_DOMAINS
        help
          This enables the iProc PCIe core controller support for Broadcom's
          iProc family of SoCs. An appropriate bus interface driver needs
@@ -176,7 +174,6 @@ config PCIE_IPROC_MSI
 config PCIE_ALTERA
        bool "Altera PCIe controller"
        depends on ARM || NIOS2 || COMPILE_TEST
-       select PCI_DOMAINS
        help
          Say Y here if you want to enable PCIe controller support on Altera
          FPGA.
index 3979f89b250ad86ec622d4aa90d1450422324db8..5bd6c1573295696acf8387bd7cd953bf8f369546 100644 (file)
@@ -7,7 +7,6 @@
  * All rights reserved.
  *
  * Send feedback to <kristen.c.accardi@intel.com>
- *
  */
 
 #include <linux/module.h>
@@ -87,8 +86,17 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
                return 0;
 
        /* If _OSC exists, we should not evaluate OSHP */
+
+       /*
+        * If there's no ACPI host bridge (i.e., ACPI support is compiled
+        * into the kernel but the hardware platform doesn't support ACPI),
+        * there's nothing to do here.
+        */
        host = pci_find_host_bridge(pdev->bus);
        root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
+       if (!root)
+               return 0;
+
        if (root->osc_support_set)
                goto no_control;
 
index 6bdb1dad805f8198a6879aeab21a5ff9051b1510..0e31f1392a53ca042519bbbe4bc37dab3bfe87c0 100644 (file)
@@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
        case PMU_TYPE_IOB:
                return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
        case PMU_TYPE_IOB_SLOW:
-               return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id);
+               return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
        case PMU_TYPE_MCB:
                return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
        case PMU_TYPE_MC:
index 76243caa08c630c064ebd674f566089bea1fc4ba..b5c880b50bb371f5fb5eeddcd0799cd9d7057289 100644 (file)
@@ -333,7 +333,7 @@ static int owl_pin_config_set(struct pinctrl_dev *pctrldev,
        unsigned long flags;
        unsigned int param;
        u32 reg, bit, width, arg;
-       int ret, i;
+       int ret = 0, i;
 
        info = &pctrl->soc->padinfo[pin];
 
index b601039d6c69a28d771eff622f0001d70bf84204..c4aa411f5935b7b0275c004a3924ffda3613630a 100644 (file)
@@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
 }
 
 static int dt_to_map_one_config(struct pinctrl *p,
-                               struct pinctrl_dev *pctldev,
+                               struct pinctrl_dev *hog_pctldev,
                                const char *statename,
                                struct device_node *np_config)
 {
+       struct pinctrl_dev *pctldev = NULL;
        struct device_node *np_pctldev;
        const struct pinctrl_ops *ops;
        int ret;
@@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
                        return -EPROBE_DEFER;
                }
                /* If we're creating a hog we can use the passed pctldev */
-               if (pctldev && (np_pctldev == p->dev->of_node))
+               if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
+                       pctldev = hog_pctldev;
                        break;
+               }
                pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
                if (pctldev)
                        break;
index ad6da1184c9f0b1117275df587f502d78abbe904..e3f1ab2290fc1baa0218bcda449cd11302ff9bb7 100644 (file)
@@ -1459,6 +1459,9 @@ static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
        struct mtk_pinctrl *hw = gpiochip_get_data(chip);
        unsigned long eint_n;
 
+       if (!hw->eint)
+               return -ENOTSUPP;
+
        eint_n = offset;
 
        return mtk_eint_find_irq(hw->eint, eint_n);
@@ -1471,7 +1474,8 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
        unsigned long eint_n;
        u32 debounce;
 
-       if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+       if (!hw->eint ||
+           pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
                return -ENOTSUPP;
 
        debounce = pinconf_to_config_argument(config);
index b3799695d8db8264ce917232cb3b1439793fe845..16ff56f93501794edb33231b770afa3dc5d55b73 100644 (file)
@@ -1000,11 +1000,6 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Unable to get eint resource\n");
-               return -ENODEV;
-       }
-
        pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(pctl->eint->base))
                return PTR_ERR(pctl->eint->base);
index b3153c095199d3bed84d7b846432fa3e783c08f0..e5647dac0818d46353629a543733fe0af804210b 100644 (file)
@@ -1590,8 +1590,11 @@ static int pcs_save_context(struct pcs_device *pcs)
 
        mux_bytes = pcs->width / BITS_PER_BYTE;
 
-       if (!pcs->saved_vals)
+       if (!pcs->saved_vals) {
                pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC);
+               if (!pcs->saved_vals)
+                       return -ENOMEM;
+       }
 
        switch (pcs->width) {
        case 64:
@@ -1651,8 +1654,13 @@ static int pinctrl_single_suspend(struct platform_device *pdev,
        if (!pcs)
                return -EINVAL;
 
-       if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
-               pcs_save_context(pcs);
+       if (pcs->flags & PCS_CONTEXT_LOSS_OFF) {
+               int ret;
+
+               ret = pcs_save_context(pcs);
+               if (ret < 0)
+                       return ret;
+       }
 
        return pinctrl_force_sleep(pcs->pctl);
 }
index 474c988d2e95eb72dbe91b16e24cbd75bf6a9dfe..d137c480db46b58df2660a740377e4c4e3437709 100644 (file)
@@ -43,7 +43,7 @@ config PTP_1588_CLOCK_DTE
 
 config PTP_1588_CLOCK_QORIQ
        tristate "Freescale QorIQ 1588 timer as PTP clock"
-       depends on GIANFAR
+       depends on GIANFAR || FSL_DPAA_ETH
        depends on PTP_1588_CLOCK
        default y
        help
index 767c485af59b2ee0583242b7dbf31581f76cb91a..547dbdac9d541ebaf57883eaa07e50786f2e46e4 100644 (file)
@@ -221,7 +221,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                }
                pct = &sysoff->ts[0];
                for (i = 0; i < sysoff->n_samples; i++) {
-                       getnstimeofday64(&ts);
+                       ktime_get_real_ts64(&ts);
                        pct->sec = ts.tv_sec;
                        pct->nsec = ts.tv_nsec;
                        pct++;
@@ -230,7 +230,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                        pct->nsec = ts.tv_nsec;
                        pct++;
                }
-               getnstimeofday64(&ts);
+               ktime_get_real_ts64(&ts);
                pct->sec = ts.tv_sec;
                pct->nsec = ts.tv_nsec;
                if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
index 1468a1642b4978f5048ccace2af1846baf9c51e9..a14c317b5a3873cc0d7e77e54df78beb2762a686 100644 (file)
 /* Caller must hold qoriq_ptp->lock. */
 static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp)
 {
+       struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
        u64 ns;
        u32 lo, hi;
 
-       lo = qoriq_read(&qoriq_ptp->regs->tmr_cnt_l);
-       hi = qoriq_read(&qoriq_ptp->regs->tmr_cnt_h);
+       lo = qoriq_read(&regs->ctrl_regs->tmr_cnt_l);
+       hi = qoriq_read(&regs->ctrl_regs->tmr_cnt_h);
        ns = ((u64) hi) << 32;
        ns |= lo;
        return ns;
@@ -52,16 +53,18 @@ static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp)
 /* Caller must hold qoriq_ptp->lock. */
 static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns)
 {
+       struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
        u32 hi = ns >> 32;
        u32 lo = ns & 0xffffffff;
 
-       qoriq_write(&qoriq_ptp->regs->tmr_cnt_l, lo);
-       qoriq_write(&qoriq_ptp->regs->tmr_cnt_h, hi);
+       qoriq_write(&regs->ctrl_regs->tmr_cnt_l, lo);
+       qoriq_write(&regs->ctrl_regs->tmr_cnt_h, hi);
 }
 
 /* Caller must hold qoriq_ptp->lock. */
 static void set_alarm(struct qoriq_ptp *qoriq_ptp)
 {
+       struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
        u64 ns;
        u32 lo, hi;
 
@@ -70,16 +73,18 @@ static void set_alarm(struct qoriq_ptp *qoriq_ptp)
        ns -= qoriq_ptp->tclk_period;
        hi = ns >> 32;
        lo = ns & 0xffffffff;
-       qoriq_write(&qoriq_ptp->regs->tmr_alarm1_l, lo);
-       qoriq_write(&qoriq_ptp->regs->tmr_alarm1_h, hi);
+       qoriq_write(&regs->alarm_regs->tmr_alarm1_l, lo);
+       qoriq_write(&regs->alarm_regs->tmr_alarm1_h, hi);
 }
 
 /* Caller must hold qoriq_ptp->lock. */
 static void set_fipers(struct qoriq_ptp *qoriq_ptp)
 {
+       struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
+
        set_alarm(qoriq_ptp);
-       qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
-       qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
+       qoriq_write(&regs->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
+       qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
 }
 
 /*
@@ -89,16 +94,17 @@ static void set_fipers(struct qoriq_ptp *qoriq_ptp)
 static irqreturn_t isr(int irq, void *priv)
 {
        struct qoriq_ptp *qoriq_ptp = priv;
+       struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
        struct ptp_clock_event event;
        u64 ns;
        u32 ack = 0, lo, hi, mask, val;
 
-       val = qoriq_read(&qoriq_ptp->regs->tmr_tevent);
+       val = qoriq_read(&regs->ctrl_regs->tmr_tevent);
 
        if (val & ETS1) {
                ack |= ETS1;
-               hi = qoriq_read(&qoriq_ptp->regs->tmr_etts1_h);
-               lo = qoriq_read(&qoriq_ptp->regs->tmr_etts1_l);
+               hi = qoriq_read(&regs->etts_regs->tmr_etts1_h);
+               lo = qoriq_read(&regs->etts_regs->tmr_etts1_l);
                event.type = PTP_CLOCK_EXTTS;
                event.index = 0;
                event.timestamp = ((u64) hi) << 32;
@@ -108,8 +114,8 @@ static irqreturn_t isr(int irq, void *priv)
 
        if (val & ETS2) {
                ack |= ETS2;
-               hi = qoriq_read(&qoriq_ptp->regs->tmr_etts2_h);
-               lo = qoriq_read(&qoriq_ptp->regs->tmr_etts2_l);
+               hi = qoriq_read(&regs->etts_regs->tmr_etts2_h);
+               lo = qoriq_read(&regs->etts_regs->tmr_etts2_l);
                event.type = PTP_CLOCK_EXTTS;
                event.index = 1;
                event.timestamp = ((u64) hi) << 32;
@@ -130,16 +136,16 @@ static irqreturn_t isr(int irq, void *priv)
                        hi = ns >> 32;
                        lo = ns & 0xffffffff;
                        spin_lock(&qoriq_ptp->lock);
-                       qoriq_write(&qoriq_ptp->regs->tmr_alarm2_l, lo);
-                       qoriq_write(&qoriq_ptp->regs->tmr_alarm2_h, hi);
+                       qoriq_write(&regs->alarm_regs->tmr_alarm2_l, lo);
+                       qoriq_write(&regs->alarm_regs->tmr_alarm2_h, hi);
                        spin_unlock(&qoriq_ptp->lock);
                        qoriq_ptp->alarm_value = ns;
                } else {
-                       qoriq_write(&qoriq_ptp->regs->tmr_tevent, ALM2);
+                       qoriq_write(&regs->ctrl_regs->tmr_tevent, ALM2);
                        spin_lock(&qoriq_ptp->lock);
-                       mask = qoriq_read(&qoriq_ptp->regs->tmr_temask);
+                       mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
                        mask &= ~ALM2EN;
-                       qoriq_write(&qoriq_ptp->regs->tmr_temask, mask);
+                       qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
                        spin_unlock(&qoriq_ptp->lock);
                        qoriq_ptp->alarm_value = 0;
                        qoriq_ptp->alarm_interval = 0;
@@ -153,7 +159,7 @@ static irqreturn_t isr(int irq, void *priv)
        }
 
        if (ack) {
-               qoriq_write(&qoriq_ptp->regs->tmr_tevent, ack);
+               qoriq_write(&regs->ctrl_regs->tmr_tevent, ack);
                return IRQ_HANDLED;
        } else
                return IRQ_NONE;
@@ -169,6 +175,7 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
        u32 tmr_add;
        int neg_adj = 0;
        struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
+       struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
 
        if (scaled_ppm < 0) {
                neg_adj = 1;
@@ -186,7 +193,7 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
 
        tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
 
-       qoriq_write(&qoriq_ptp->regs->tmr_add, tmr_add);
+       qoriq_write(&regs->ctrl_regs->tmr_add, tmr_add);
 
        return 0;
 }
@@ -250,6 +257,7 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
                              struct ptp_clock_request *rq, int on)
 {
        struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps);
+       struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
        unsigned long flags;
        u32 bit, mask;
 
@@ -266,23 +274,23 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp,
                        return -EINVAL;
                }
                spin_lock_irqsave(&qoriq_ptp->lock, flags);
-               mask = qoriq_read(&qoriq_ptp->regs->tmr_temask);
+               mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
                if (on)
                        mask |= bit;
                else
                        mask &= ~bit;
-               qoriq_write(&qoriq_ptp->regs->tmr_temask, mask);
+               qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
                spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
                return 0;
 
        case PTP_CLK_REQ_PPS:
                spin_lock_irqsave(&qoriq_ptp->lock, flags);
-               mask = qoriq_read(&qoriq_ptp->regs->tmr_temask);
+               mask = qoriq_read(&regs->ctrl_regs->tmr_temask);
                if (on)
                        mask |= PP1EN;
                else
                        mask &= ~PP1EN;
-               qoriq_write(&qoriq_ptp->regs->tmr_temask, mask);
+               qoriq_write(&regs->ctrl_regs->tmr_temask, mask);
                spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
                return 0;
 
@@ -313,10 +321,12 @@ static int qoriq_ptp_probe(struct platform_device *dev)
 {
        struct device_node *node = dev->dev.of_node;
        struct qoriq_ptp *qoriq_ptp;
+       struct qoriq_ptp_registers *regs;
        struct timespec64 now;
        int err = -ENOMEM;
        u32 tmr_ctrl;
        unsigned long flags;
+       void __iomem *base;
 
        qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL);
        if (!qoriq_ptp)
@@ -351,7 +361,7 @@ static int qoriq_ptp_probe(struct platform_device *dev)
                pr_err("irq not in device tree\n");
                goto no_node;
        }
-       if (request_irq(qoriq_ptp->irq, isr, 0, DRIVER, qoriq_ptp)) {
+       if (request_irq(qoriq_ptp->irq, isr, IRQF_SHARED, DRIVER, qoriq_ptp)) {
                pr_err("request_irq failed\n");
                goto no_node;
        }
@@ -368,13 +378,28 @@ static int qoriq_ptp_probe(struct platform_device *dev)
 
        spin_lock_init(&qoriq_ptp->lock);
 
-       qoriq_ptp->regs = ioremap(qoriq_ptp->rsrc->start,
-                               resource_size(qoriq_ptp->rsrc));
-       if (!qoriq_ptp->regs) {
+       base = ioremap(qoriq_ptp->rsrc->start,
+                      resource_size(qoriq_ptp->rsrc));
+       if (!base) {
                pr_err("ioremap ptp registers failed\n");
                goto no_ioremap;
        }
-       getnstimeofday64(&now);
+
+       qoriq_ptp->base = base;
+
+       if (of_device_is_compatible(node, "fsl,fman-ptp-timer")) {
+               qoriq_ptp->regs.ctrl_regs = base + FMAN_CTRL_REGS_OFFSET;
+               qoriq_ptp->regs.alarm_regs = base + FMAN_ALARM_REGS_OFFSET;
+               qoriq_ptp->regs.fiper_regs = base + FMAN_FIPER_REGS_OFFSET;
+               qoriq_ptp->regs.etts_regs = base + FMAN_ETTS_REGS_OFFSET;
+       } else {
+               qoriq_ptp->regs.ctrl_regs = base + CTRL_REGS_OFFSET;
+               qoriq_ptp->regs.alarm_regs = base + ALARM_REGS_OFFSET;
+               qoriq_ptp->regs.fiper_regs = base + FIPER_REGS_OFFSET;
+               qoriq_ptp->regs.etts_regs = base + ETTS_REGS_OFFSET;
+       }
+
+       ktime_get_real_ts64(&now);
        ptp_qoriq_settime(&qoriq_ptp->caps, &now);
 
        tmr_ctrl =
@@ -383,13 +408,14 @@ static int qoriq_ptp_probe(struct platform_device *dev)
 
        spin_lock_irqsave(&qoriq_ptp->lock, flags);
 
-       qoriq_write(&qoriq_ptp->regs->tmr_ctrl,   tmr_ctrl);
-       qoriq_write(&qoriq_ptp->regs->tmr_add,    qoriq_ptp->tmr_add);
-       qoriq_write(&qoriq_ptp->regs->tmr_prsc,   qoriq_ptp->tmr_prsc);
-       qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
-       qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
+       regs = &qoriq_ptp->regs;
+       qoriq_write(&regs->ctrl_regs->tmr_ctrl,   tmr_ctrl);
+       qoriq_write(&regs->ctrl_regs->tmr_add,    qoriq_ptp->tmr_add);
+       qoriq_write(&regs->ctrl_regs->tmr_prsc,   qoriq_ptp->tmr_prsc);
+       qoriq_write(&regs->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1);
+       qoriq_write(&regs->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2);
        set_alarm(qoriq_ptp);
-       qoriq_write(&qoriq_ptp->regs->tmr_ctrl,   tmr_ctrl|FIPERST|RTPE|TE|FRD);
+       qoriq_write(&regs->ctrl_regs->tmr_ctrl,   tmr_ctrl|FIPERST|RTPE|TE|FRD);
 
        spin_unlock_irqrestore(&qoriq_ptp->lock, flags);
 
@@ -405,7 +431,7 @@ static int qoriq_ptp_probe(struct platform_device *dev)
        return 0;
 
 no_clock:
-       iounmap(qoriq_ptp->regs);
+       iounmap(qoriq_ptp->base);
 no_ioremap:
        release_resource(qoriq_ptp->rsrc);
 no_resource:
@@ -419,12 +445,13 @@ static int qoriq_ptp_probe(struct platform_device *dev)
 static int qoriq_ptp_remove(struct platform_device *dev)
 {
        struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev);
+       struct qoriq_ptp_registers *regs = &qoriq_ptp->regs;
 
-       qoriq_write(&qoriq_ptp->regs->tmr_temask, 0);
-       qoriq_write(&qoriq_ptp->regs->tmr_ctrl,   0);
+       qoriq_write(&regs->ctrl_regs->tmr_temask, 0);
+       qoriq_write(&regs->ctrl_regs->tmr_ctrl,   0);
 
        ptp_clock_unregister(qoriq_ptp->clock);
-       iounmap(qoriq_ptp->regs);
+       iounmap(qoriq_ptp->base);
        release_resource(qoriq_ptp->rsrc);
        free_irq(qoriq_ptp->irq, qoriq_ptp);
        kfree(qoriq_ptp);
@@ -434,6 +461,7 @@ static int qoriq_ptp_remove(struct platform_device *dev)
 
 static const struct of_device_id match_table[] = {
        { .compatible = "fsl,etsec-ptp" },
+       { .compatible = "fsl,fman-ptp-timer" },
        {},
 };
 MODULE_DEVICE_TABLE(of, match_table);
index 73cce3ecb97fefbccc66266a4fd29f08e453079e..d3a38c421503abbaab03d81fdaac739591251920 100644 (file)
@@ -1222,80 +1222,37 @@ static void dasd_hosts_init(struct dentry *base_dentry,
                device->hosts_dentry = pde;
 }
 
-/*
- * Allocate memory for a channel program with 'cplength' channel
- * command words and 'datasize' additional space. There are two
- * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
- * memory and 2) dasd_smalloc_request uses the static ccw memory
- * that gets allocated for each device.
- */
-struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
-                                         int datasize,
-                                         struct dasd_device *device)
-{
-       struct dasd_ccw_req *cqr;
-
-       /* Sanity checks */
-       BUG_ON(datasize > PAGE_SIZE ||
-            (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
-
-       cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
-       if (cqr == NULL)
-               return ERR_PTR(-ENOMEM);
-       cqr->cpaddr = NULL;
-       if (cplength > 0) {
-               cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
-                                     GFP_ATOMIC | GFP_DMA);
-               if (cqr->cpaddr == NULL) {
-                       kfree(cqr);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       cqr->data = NULL;
-       if (datasize > 0) {
-               cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
-               if (cqr->data == NULL) {
-                       kfree(cqr->cpaddr);
-                       kfree(cqr);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       cqr->magic =  magic;
-       set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
-       dasd_get_device(device);
-       return cqr;
-}
-EXPORT_SYMBOL(dasd_kmalloc_request);
-
-struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
-                                         int datasize,
-                                         struct dasd_device *device)
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
+                                         struct dasd_device *device,
+                                         struct dasd_ccw_req *cqr)
 {
        unsigned long flags;
-       struct dasd_ccw_req *cqr;
-       char *data;
-       int size;
+       char *data, *chunk;
+       int size = 0;
 
-       size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
        if (cplength > 0)
                size += cplength * sizeof(struct ccw1);
        if (datasize > 0)
                size += datasize;
+       if (!cqr)
+               size += (sizeof(*cqr) + 7L) & -8L;
+
        spin_lock_irqsave(&device->mem_lock, flags);
-       cqr = (struct dasd_ccw_req *)
-               dasd_alloc_chunk(&device->ccw_chunks, size);
+       data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
        spin_unlock_irqrestore(&device->mem_lock, flags);
-       if (cqr == NULL)
+       if (!chunk)
                return ERR_PTR(-ENOMEM);
-       memset(cqr, 0, sizeof(struct dasd_ccw_req));
-       data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
-       cqr->cpaddr = NULL;
+       if (!cqr) {
+               cqr = (void *) data;
+               data += (sizeof(*cqr) + 7L) & -8L;
+       }
+       memset(cqr, 0, sizeof(*cqr));
+       cqr->mem_chunk = chunk;
        if (cplength > 0) {
-               cqr->cpaddr = (struct ccw1 *) data;
-               data += cplength*sizeof(struct ccw1);
-               memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+               cqr->cpaddr = data;
+               data += cplength * sizeof(struct ccw1);
+               memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
        }
-       cqr->data = NULL;
        if (datasize > 0) {
                cqr->data = data;
                memset(cqr->data, 0, datasize);
@@ -1307,33 +1264,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
 }
 EXPORT_SYMBOL(dasd_smalloc_request);
 
-/*
- * Free memory of a channel program. This function needs to free all the
- * idal lists that might have been created by dasd_set_cda and the
- * struct dasd_ccw_req itself.
- */
-void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
-{
-       struct ccw1 *ccw;
-
-       /* Clear any idals used for the request. */
-       ccw = cqr->cpaddr;
-       do {
-               clear_normalized_cda(ccw);
-       } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
-       kfree(cqr->cpaddr);
-       kfree(cqr->data);
-       kfree(cqr);
-       dasd_put_device(device);
-}
-EXPORT_SYMBOL(dasd_kfree_request);
-
 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&device->mem_lock, flags);
-       dasd_free_chunk(&device->ccw_chunks, cqr);
+       dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
        spin_unlock_irqrestore(&device->mem_lock, flags);
        dasd_put_device(device);
 }
@@ -1885,6 +1821,33 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
        }
 }
 
+static void __dasd_process_cqr(struct dasd_device *device,
+                              struct dasd_ccw_req *cqr)
+{
+       char errorstring[ERRORLENGTH];
+
+       switch (cqr->status) {
+       case DASD_CQR_SUCCESS:
+               cqr->status = DASD_CQR_DONE;
+               break;
+       case DASD_CQR_ERROR:
+               cqr->status = DASD_CQR_NEED_ERP;
+               break;
+       case DASD_CQR_CLEARED:
+               cqr->status = DASD_CQR_TERMINATED;
+               break;
+       default:
+               /* internal error 12 - wrong cqr status*/
+               snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
+               dev_err(&device->cdev->dev,
+                       "An error occurred in the DASD device driver, "
+                       "reason=%s\n", errorstring);
+               BUG();
+       }
+       if (cqr->callback)
+               cqr->callback(cqr, cqr->callback_data);
+}
+
 /*
  * the cqrs from the final queue are returned to the upper layer
  * by setting a dasd_block state and calling the callback function
@@ -1895,40 +1858,18 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
        struct list_head *l, *n;
        struct dasd_ccw_req *cqr;
        struct dasd_block *block;
-       void (*callback)(struct dasd_ccw_req *, void *data);
-       void *callback_data;
-       char errorstring[ERRORLENGTH];
 
        list_for_each_safe(l, n, final_queue) {
                cqr = list_entry(l, struct dasd_ccw_req, devlist);
                list_del_init(&cqr->devlist);
                block = cqr->block;
-               callback = cqr->callback;
-               callback_data = cqr->callback_data;
-               if (block)
+               if (!block) {
+                       __dasd_process_cqr(device, cqr);
+               } else {
                        spin_lock_bh(&block->queue_lock);
-               switch (cqr->status) {
-               case DASD_CQR_SUCCESS:
-                       cqr->status = DASD_CQR_DONE;
-                       break;
-               case DASD_CQR_ERROR:
-                       cqr->status = DASD_CQR_NEED_ERP;
-                       break;
-               case DASD_CQR_CLEARED:
-                       cqr->status = DASD_CQR_TERMINATED;
-                       break;
-               default:
-                       /* internal error 12 - wrong cqr status*/
-                       snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
-                       dev_err(&device->cdev->dev,
-                               "An error occurred in the DASD device driver, "
-                               "reason=%s\n", errorstring);
-                       BUG();
-               }
-               if (cqr->callback != NULL)
-                       (callback)(cqr, callback_data);
-               if (block)
+                       __dasd_process_cqr(device, cqr);
                        spin_unlock_bh(&block->queue_lock);
+               }
        }
 }
 
@@ -3041,7 +2982,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
        cqr->callback_data = req;
        cqr->status = DASD_CQR_FILLED;
        cqr->dq = dq;
-       *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
 
        blk_mq_start_request(req);
        spin_lock(&block->queue_lock);
@@ -3072,7 +3012,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
        unsigned long flags;
        int rc = 0;
 
-       cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
+       cqr = blk_mq_rq_to_pdu(req);
        if (!cqr)
                return BLK_EH_DONE;
 
@@ -3174,7 +3114,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
        int rc;
 
        block->tag_set.ops = &dasd_mq_ops;
-       block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
+       block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
        block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
        block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
        block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -4038,7 +3978,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
        struct ccw1 *ccw;
        unsigned long *idaw;
 
-       cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
+       cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
+                                  NULL);
 
        if (IS_ERR(cqr)) {
                /* internal error 13 - Allocating the RDC request failed*/
index 5e963fe0e38d4c2125c43ae801ca7e9b28d98d07..e36a114354fc368e2141aae5c080c7f61b674da8 100644 (file)
@@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device,
        int rc;
        unsigned long flags;
 
-       cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr))
                return PTR_ERR(cqr);
        cqr->startdev = device;
@@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
                lcu->flags |= NEED_UAC_UPDATE;
                spin_unlock_irqrestore(&lcu->lock, flags);
        }
-       dasd_kfree_request(cqr, cqr->memdev);
+       dasd_sfree_request(cqr, cqr->memdev);
        return rc;
 }
 
index 131f1989f6f3dff0345250c71943f5ac338af19c..e1fe02477ea8fca951232dabe7f89754c8f287ff 100644 (file)
@@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        /* Build the request */
        datasize = sizeof(struct dasd_diag_req) +
                count*sizeof(struct dasd_diag_bio);
-       cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
index be208e7adcb46087e7fb2436fadf8a737d7c472e..bbf95b78ef5d9e4c5903e466e7de3f71b615c9ce 100644 (file)
@@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
        }
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
                                   0, /* use rcd_buf as data ara */
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                              "Could not allocate RCD request");
@@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_features)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
                                "allocate initialization request");
@@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
                                  sizeof(struct dasd_psf_ssc_data),
-                                 device);
+                                  device, NULL);
 
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
 
        cplength = 8;
        datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
+                                  NULL);
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
         */
        itcw_size = itcw_calc_size(0, count, 0);
 
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+                                  NULL);
        if (IS_ERR(cqr))
                return cqr;
 
@@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
        cplength += count;
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                 startdev);
+                                  startdev, NULL);
        if (IS_ERR(cqr))
                return cqr;
 
@@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base,
        }
        /* Allocate the format ccw request. */
        fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
-                                  datasize, startdev);
+                                  datasize, startdev, NULL);
        if (IS_ERR(fcp))
                return fcp;
 
@@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        }
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                  startdev);
+                                  startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
 
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                  startdev);
+                                  startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
 
        /* Allocate the ccw request. */
        itcw_size = itcw_calc_size(0, ctidaw, 0);
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
 
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
-                                  datasize, startdev);
+                                  datasize, startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device,
 
        useglobal = 0;
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
-                                  sizeof(struct dasd_snid_data), device);
+                                  sizeof(struct dasd_snid_data), device,
+                                  NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_perf_stats_t)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                            "Could not allocate initialization request");
@@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
        psf1 = psf_data[1];
 
        /* setup CCWs for PSF + RSSD */
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                        "Could not allocate initialization request");
@@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_messages)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
                                "Could not allocate read message buffer request");
@@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   sizeof(struct dasd_psf_prssd_data) + 1,
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
                                "Could not allocate read message buffer request");
@@ -5284,8 +5288,8 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
        int rc;
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
-                                 sizeof(struct dasd_psf_cuir_response),
-                                 device);
+                                  sizeof(struct dasd_psf_cuir_response),
+                                  device, NULL);
 
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
index 0af8c5295b650b1132e5946b123b558a08e91ccc..6ef8714dc6935047ec0f48d8cc3f6e34dd77c19c 100644 (file)
@@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
                 * is a new ccw in device->eer_cqr. Free the "old"
                 * snss request now.
                 */
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 }
 
 /*
@@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device)
        if (rc)
                goto out;
 
-       cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
-                                  SNSS_DATA_SIZE, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
+                                  SNSS_DATA_SIZE, device, NULL);
        if (IS_ERR(cqr)) {
                rc = -ENOMEM;
                cqr = NULL;
@@ -505,7 +505,7 @@ int dasd_eer_enable(struct dasd_device *device)
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 
        if (cqr)
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 
        return rc;
 }
@@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device)
        in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
        if (cqr && !in_use)
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 }
 
 /*
index a6b132f7e869eb4eb804b3fa8407cd064c92b699..56007a3e7f110358e27ad74563f24e428cbae473 100644 (file)
@@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard(
        datasize = sizeof(struct DE_fba_data) +
                nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
 
-       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
                datasize += (count - 1)*sizeof(struct LO_fba_data);
        }
        /* Allocate the ccw request. */
-       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
index 96709b1a7bf8d8af0f4e0db7748cd5ac8e5a8650..976b6bd4fb05ccb6afc5de34cdb12c080c7f2428 100644 (file)
@@ -158,40 +158,33 @@ do { \
 
 struct dasd_ccw_req {
        unsigned int magic;             /* Eye catcher */
+       int intrc;                      /* internal error, e.g. from start_IO */
        struct list_head devlist;       /* for dasd_device request queue */
        struct list_head blocklist;     /* for dasd_block request queue */
-
-       /* Where to execute what... */
        struct dasd_block *block;       /* the originating block device */
        struct dasd_device *memdev;     /* the device used to allocate this */
        struct dasd_device *startdev;   /* device the request is started on */
        struct dasd_device *basedev;    /* base device if no block->base */
        void *cpaddr;                   /* address of ccw or tcw */
+       short retries;                  /* A retry counter */
        unsigned char cpmode;           /* 0 = cmd mode, 1 = itcw */
        char status;                    /* status of this request */
-       short retries;                  /* A retry counter */
+       char lpm;                       /* logical path mask */
        unsigned long flags;            /* flags of this request */
        struct dasd_queue *dq;
-
-       /* ... and how */
        unsigned long starttime;        /* jiffies time of request start */
        unsigned long expires;          /* expiration period in jiffies */
-       char lpm;                       /* logical path mask */
        void *data;                     /* pointer to data area */
-
-       /* these are important for recovering erroneous requests          */
-       int intrc;                      /* internal error, e.g. from start_IO */
        struct irb irb;                 /* device status in case of an error */
        struct dasd_ccw_req *refers;    /* ERP-chain queueing. */
        void *function;                 /* originating ERP action */
+       void *mem_chunk;
 
-       /* these are for statistics only */
        unsigned long buildclk;         /* TOD-clock of request generation */
        unsigned long startclk;         /* TOD-clock of request start */
        unsigned long stopclk;          /* TOD-clock of request interrupt */
        unsigned long endclk;           /* TOD-clock of request termination */
 
-        /* Callback that is called after reaching final status. */
        void (*callback)(struct dasd_ccw_req *, void *data);
        void *callback_data;
 };
@@ -714,19 +707,10 @@ extern const struct block_device_operations dasd_device_operations;
 extern struct kmem_cache *dasd_page_cache;
 
 struct dasd_ccw_req *
-dasd_kmalloc_request(int , int, int, struct dasd_device *);
-struct dasd_ccw_req *
-dasd_smalloc_request(int , int, int, struct dasd_device *);
-void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
+dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
 void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
 void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
 
-static inline int
-dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
-{
-       return set_normalized_cda(ccw, cda);
-}
-
 struct dasd_device *dasd_alloc_device(void);
 void dasd_free_device(struct dasd_device *);
 
index a070ef0efe65d0079cc10245b1ed8b79b8e8fba9..f230516abb96d31b4eabb2689a7230905857c48f 100644 (file)
@@ -5,6 +5,7 @@
 
 # The following is required for define_trace.h to find ./trace.h
 CFLAGS_trace.o := -I$(src)
+CFLAGS_vfio_ccw_fsm.o := -I$(src)
 
 obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
        fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
index dce92b2a895d6ff3bbe38104ed08ea32c7979432..dbe7c7ac9ac8c8c4456f142b14c740d3bdc0c5e6 100644 (file)
 #define CCWCHAIN_LEN_MAX       256
 
 struct pfn_array {
+       /* Starting guest physical I/O address. */
        unsigned long           pa_iova;
+       /* Array that stores PFNs of the pages need to pin. */
        unsigned long           *pa_iova_pfn;
+       /* Array that receives PFNs of the pages pinned. */
        unsigned long           *pa_pfn;
+       /* Number of pages pinned from @pa_iova. */
        int                     pa_nr;
 };
 
@@ -46,70 +50,33 @@ struct ccwchain {
 };
 
 /*
- * pfn_array_pin() - pin user pages in memory
+ * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
  * @pa: pfn_array on which to perform the operation
  * @mdev: the mediated device to perform pin/unpin operations
+ * @iova: target guest physical address
+ * @len: number of bytes that should be pinned from @iova
  *
- * Attempt to pin user pages in memory.
+ * Attempt to allocate memory for PFNs, and pin user pages in memory.
  *
  * Usage of pfn_array:
- * @pa->pa_iova     starting guest physical I/O address. Assigned by caller.
- * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated
- *                  by caller.
- * @pa->pa_pfn      array that receives PFNs of the pages pinned. Allocated by
- *                  caller.
- * @pa->pa_nr       number of pages from @pa->pa_iova to pin. Assigned by
- *                  caller.
- *                  number of pages pinned. Assigned by callee.
+ * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * this structure will be filled in by this function.
  *
  * Returns:
  *   Number of pages pinned on success.
- *   If @pa->pa_nr is 0 or negative, returns 0.
+ *   If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
+ *   returns -EINVAL.
  *   If no pages were pinned, returns -errno.
  */
-static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
-{
-       int i, ret;
-
-       if (pa->pa_nr <= 0) {
-               pa->pa_nr = 0;
-               return 0;
-       }
-
-       pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
-       for (i = 1; i < pa->pa_nr; i++)
-               pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
-
-       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
-                            IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
-
-       if (ret > 0 && ret != pa->pa_nr) {
-               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
-               pa->pa_nr = 0;
-               return 0;
-       }
-
-       return ret;
-}
-
-/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
-{
-       vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
-       pa->pa_nr = 0;
-       kfree(pa->pa_iova_pfn);
-}
-
-/* Alloc memory for PFNs, then pin pages with them. */
 static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                               u64 iova, unsigned int len)
 {
-       int ret = 0;
+       int i, ret = 0;
 
        if (!len)
                return 0;
 
-       if (pa->pa_nr)
+       if (pa->pa_nr || pa->pa_iova_pfn)
                return -EINVAL;
 
        pa->pa_iova = iova;
@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                return -ENOMEM;
        pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
 
-       ret = pfn_array_pin(pa, mdev);
+       pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
+       for (i = 1; i < pa->pa_nr; i++)
+               pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
 
-       if (ret > 0)
-               return ret;
-       else if (!ret)
+       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
+                            IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+
+       if (ret < 0) {
+               goto err_out;
+       } else if (ret > 0 && ret != pa->pa_nr) {
+               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
                ret = -EINVAL;
+               goto err_out;
+       }
 
+       return ret;
+
+err_out:
+       pa->pa_nr = 0;
        kfree(pa->pa_iova_pfn);
+       pa->pa_iova_pfn = NULL;
 
        return ret;
 }
 
+/* Unpin the pages before releasing the memory. */
+static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
+{
+       vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+       pa->pa_nr = 0;
+       kfree(pa->pa_iova_pfn);
+}
+
 static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
 {
        pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp)
  * This is the chain length not considering any TICs.
  * You need to do a new round for each TIC target.
  *
+ * The program is also validated for absence of not yet supported
+ * indirect data addressing scenarios.
+ *
  * Returns: the length of the ccw chain or -errno.
  */
 static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
        do {
                cnt++;
 
+               /*
+                * As we don't want to fail direct addressing even if the
+                * orb specified one of the unsupported formats, we defer
+                * checking for IDAWs in unsupported formats to here.
+                */
+               if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
+                       return -EOPNOTSUPP;
+
                if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
                        break;
 
@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        struct ccw1 *ccw;
        struct pfn_array_table *pat;
        unsigned long *idaws;
-       int idaw_nr;
+       int ret;
 
        ccw = chain->ch_ccw + idx;
 
@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
         * needed when translating a direct ccw to a idal ccw.
         */
        pat = chain->ch_pat + idx;
-       if (pfn_array_table_init(pat, 1))
-               return -ENOMEM;
-       idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev,
-                                     ccw->cda, ccw->count);
-       if (idaw_nr < 0)
-               return idaw_nr;
+       ret = pfn_array_table_init(pat, 1);
+       if (ret)
+               goto out_init;
+
+       ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
+       if (ret < 0)
+               goto out_init;
 
        /* Translate this direct ccw to a idal ccw. */
-       idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
+       idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
        if (!idaws) {
-               pfn_array_table_unpin_free(pat, cp->mdev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_unpin;
        }
        ccw->cda = (__u32) virt_to_phys(idaws);
        ccw->flags |= CCW_FLAG_IDA;
@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        pfn_array_table_idal_create_words(pat, idaws);
 
        return 0;
+
+out_unpin:
+       pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+       ccw->cda = 0;
+       return ret;
 }
 
 static int ccwchain_fetch_idal(struct ccwchain *chain,
@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
        pat = chain->ch_pat + idx;
        ret = pfn_array_table_init(pat, idaw_nr);
        if (ret)
-               return ret;
+               goto out_init;
 
        /* Translate idal ccw to use new allocated idaws. */
        idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
@@ -603,6 +609,8 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
        kfree(idaws);
 out_unpin:
        pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+       ccw->cda = 0;
        return ret;
 }
 
@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        /*
         * XXX:
         * Only support prefetch enable mode now.
-        * Only support 64bit addressing idal.
-        * Only support 4k IDAW.
         */
-       if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k)
+       if (!orb->cmd.pfch)
                return -EOPNOTSUPP;
 
        INIT_LIST_HEAD(&cp->ccwchain_list);
@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        ret = ccwchain_loop_tic(chain, cp);
        if (ret)
                cp_unpin_free(cp);
+       /* It is safe to force: if not set but idals used
+        * ccwchain_calc_length returns an error.
+        */
+       cp->orb.cmd.c64 = 1;
 
        return ret;
 }
index ea6a2d0b2894decac95c3421c544183ee89c3383..770fa9cfc31041dd84a78a00f0f4135bef5a79ed 100644 (file)
@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 {
        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
        unsigned long flags;
+       int rc = -EAGAIN;
 
        spin_lock_irqsave(sch->lock, flags);
        if (!device_is_registered(&sch->dev))
@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 
        if (cio_update_schib(sch)) {
                vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+               rc = 0;
                goto out_unlock;
        }
 
@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
                private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
                                 VFIO_CCW_STATE_STANDBY;
        }
+       rc = 0;
 
 out_unlock:
        spin_unlock_irqrestore(sch->lock, flags);
 
-       return 0;
+       return rc;
 }
 
 static struct css_device_id vfio_ccw_sch_ids[] = {
index 3c800642134e4330d62bb8c0053df62618840ff3..797a82731159a5f9f584810f924adc3467b1e702 100644 (file)
@@ -13,6 +13,9 @@
 #include "ioasm.h"
 #include "vfio_ccw_private.h"
 
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
 static int fsm_io_helper(struct vfio_ccw_private *private)
 {
        struct subchannel *sch;
@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private,
         */
        cio_disable_subchannel(sch);
 }
+inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
+{
+       return p->sch->schid;
+}
 
 /*
  * Deal with the ccw command request from the userspace.
@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
        union scsw *scsw = &private->scsw;
        struct ccw_io_region *io_region = &private->io_region;
        struct mdev_device *mdev = private->mdev;
+       char *errstr = "request";
 
        private->state = VFIO_CCW_STATE_BOXED;
 
@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                /* Don't try to build a cp if transport mode is specified. */
                if (orb->tm.b) {
                        io_region->ret_code = -EOPNOTSUPP;
+                       errstr = "transport mode";
                        goto err_out;
                }
                io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
                                              orb);
-               if (io_region->ret_code)
+               if (io_region->ret_code) {
+                       errstr = "cp init";
                        goto err_out;
+               }
 
                io_region->ret_code = cp_prefetch(&private->cp);
                if (io_region->ret_code) {
+                       errstr = "cp prefetch";
                        cp_free(&private->cp);
                        goto err_out;
                }
@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                /* Start channel program and wait for I/O interrupt. */
                io_region->ret_code = fsm_io_helper(private);
                if (io_region->ret_code) {
+                       errstr = "cp fsm_io_helper";
                        cp_free(&private->cp);
                        goto err_out;
                }
@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
 
 err_out:
        private->state = VFIO_CCW_STATE_IDLE;
+       trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
+                              io_region->ret_code, errstr);
 }
 
 /*
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
new file mode 100644 (file)
index 0000000..b1da53d
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Tracepoints for vfio_ccw driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Halil Pasic <pasic@linux.vnet.ibm.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_ccw
+
+#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VFIO_CCW_TRACE_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_ccw_io_fctl,
+       TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
+       TP_ARGS(fctl, schid, errno, errstr),
+
+       TP_STRUCT__entry(
+               __field(int, fctl)
+               __field_struct(struct subchannel_id, schid)
+               __field(int, errno)
+               __field(char*, errstr)
+       ),
+
+       TP_fast_assign(
+               __entry->fctl = fctl;
+               __entry->schid = schid;
+               __entry->errno = errno;
+               __entry->errstr = errstr;
+       ),
+
+       TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
+                 __entry->schid.cssid,
+                 __entry->schid.ssid,
+                 __entry->schid.sch_no,
+                 __entry->fctl,
+                 __entry->errno,
+                 __entry->errstr)
+);
+
+#endif /* _VFIO_CCW_TRACE_ */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vfio_ccw_trace
+
+#include <trace/define_trace.h>
index c7e484f706543ebdc2ce63084cf1d45a2146869e..7c5a25ddf8321e9111a307d39002a237945f2856 100644 (file)
@@ -95,4 +95,14 @@ config CCWGROUP
        tristate
        default (LCS || CTCM || QETH)
 
+config ISM
+       tristate "Support for ISM vPCI Adapter"
+       depends on PCI && SMC
+       default n
+       help
+         Select this option if you want to use the Internal Shared Memory
+         vPCI Adapter.
+
+         To compile as a module choose M. The module name is ism.
+         If unsure, choose N.
 endmenu
index 513b7ae64980ef1da15bfcfa04f2689a6723300b..f2d6bbe57a6fcf97e70c5adb5449c3caf3bbb530 100644 (file)
@@ -15,3 +15,6 @@ qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
 obj-$(CONFIG_QETH_L2) += qeth_l2.o
 qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
 obj-$(CONFIG_QETH_L3) += qeth_l3.o
+
+ism-y := ism_drv.o
+obj-$(CONFIG_ISM) += ism.o
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
new file mode 100644 (file)
index 0000000..0aab908
--- /dev/null
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_ISM_H
+#define S390_ISM_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <net/smc.h>
+
+#define UTIL_STR_LEN   16
+
+/*
+ * Do not use the first word of the DMB bits to ensure 8 byte aligned access.
+ */
+#define ISM_DMB_WORD_OFFSET    1
+#define ISM_DMB_BIT_OFFSET     (ISM_DMB_WORD_OFFSET * 32)
+#define ISM_NR_DMBS            1920
+
+#define ISM_REG_SBA    0x1
+#define ISM_REG_IEQ    0x2
+#define ISM_READ_GID   0x3
+#define ISM_ADD_VLAN_ID        0x4
+#define ISM_DEL_VLAN_ID        0x5
+#define ISM_SET_VLAN   0x6
+#define ISM_RESET_VLAN 0x7
+#define ISM_QUERY_INFO 0x8
+#define ISM_QUERY_RGID 0x9
+#define ISM_REG_DMB    0xA
+#define ISM_UNREG_DMB  0xB
+#define ISM_SIGNAL_IEQ 0xE
+#define ISM_UNREG_SBA  0x11
+#define ISM_UNREG_IEQ  0x12
+
+#define ISM_ERROR      0xFFFF
+
+struct ism_req_hdr {
+       u32 cmd;
+       u16 : 16;
+       u16 len;
+};
+
+struct ism_resp_hdr {
+       u32 cmd;
+       u16 ret;
+       u16 len;
+};
+
+union ism_reg_sba {
+       struct {
+               struct ism_req_hdr hdr;
+               u64 sba;
+       } request;
+       struct {
+               struct ism_resp_hdr hdr;
+       } response;
+} __aligned(16);
+
+union ism_reg_ieq {
+       struct {
+               struct ism_req_hdr hdr;
+               u64 ieq;
+               u64 len;
+       } request;
+       struct {
+               struct ism_resp_hdr hdr;
+       } response;
+} __aligned(16);
+
+union ism_read_gid {
+       struct {
+               struct ism_req_hdr hdr;
+       } request;
+       struct {
+               struct ism_resp_hdr hdr;
+               u64 gid;
+       } response;
+} __aligned(16);
+
+union ism_qi {
+       struct {
+               struct ism_req_hdr hdr;
+       } request;
+       struct {
+               struct ism_resp_hdr hdr;
+               u32 version;
+               u32 max_len;
+               u64 ism_state;
+               u64 my_gid;
+               u64 sba;
+               u64 ieq;
+               u32 ieq_len;
+               u32 : 32;
+               u32 dmbs_owned;
+               u32 dmbs_used;
+               u32 vlan_required;
+               u32 vlan_nr_ids;
+               u16 vlan_id[64];
+       } response;
+} __aligned(64);
+
+union ism_query_rgid {
+       struct {
+               struct ism_req_hdr hdr;
+               u64 rgid;
+               u32 vlan_valid;
+               u32 vlan_id;
+       } request;
+       struct {
+               struct ism_resp_hdr hdr;
+       } response;
+} __aligned(16);
+
+union ism_reg_dmb {
+       struct {
+               struct ism_req_hdr hdr;
+               u64 dmb;
+               u32 dmb_len;
+               u32 sba_idx;
+               u32 vlan_valid;
+               u32 vlan_id;
+               u64 rgid;
+       } request;
+       struct {
+               struct ism_resp_hdr hdr;
+               u64 dmb_tok;
+       } response;
+} __aligned(32);
+
+union ism_sig_ieq {
+       struct {
+               struct ism_req_hdr hdr;
+               u64 rgid;
+               u32 trigger_irq;
+               u32 event_code;
+               u64 info;
+       } request;
+       struct {
+               struct ism_resp_hdr hdr;
+       } response;
+} __aligned(32);
+
+union ism_unreg_dmb {
+       struct {
+               struct ism_req_hdr hdr;
+               u64 dmb_tok;
+       } request;
+       struct {
+               struct ism_resp_hdr hdr;
+       } response;
+} __aligned(16);
+
+union ism_cmd_simple {
+       struct {
+               struct ism_req_hdr hdr;
+       } request;
+       struct {
+               struct ism_resp_hdr hdr;
+       } response;
+} __aligned(8);
+
+union ism_set_vlan_id {
+       struct {
+               struct ism_req_hdr hdr;
+               u64 vlan_id;
+       } request;
+       struct {
+               struct ism_resp_hdr hdr;
+       } response;
+} __aligned(16);
+
+struct ism_eq_header {
+       u64 idx;
+       u64 ieq_len;
+       u64 entry_len;
+       u64 : 64;
+};
+
+struct ism_eq {
+       struct ism_eq_header header;
+       struct smcd_event entry[15];
+};
+
+struct ism_sba {
+       u32 s : 1;      /* summary bit */
+       u32 e : 1;      /* event bit */
+       u32 : 30;
+       u32 dmb_bits[ISM_NR_DMBS / 32];
+       u32 reserved[3];
+       u16 dmbe_mask[ISM_NR_DMBS];
+};
+
+struct ism_dev {
+       spinlock_t lock;
+       struct pci_dev *pdev;
+       struct smcd_dev *smcd;
+
+       void __iomem *ctl;
+
+       struct ism_sba *sba;
+       dma_addr_t sba_dma_addr;
+       DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+
+       struct ism_eq *ieq;
+       dma_addr_t ieq_dma_addr;
+
+       int ieq_idx;
+};
+
+#define ISM_CREATE_REQ(dmb, idx, sf, offset)           \
+       ((dmb) | (idx) << 24 | (sf) << 23 | (offset))
+
+static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data,
+                            unsigned int size)
+{
+       struct zpci_dev *zdev = to_zpci(ism->pdev);
+       u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size);
+
+       return zpci_write_block(req, data, dmb_req);
+}
+
+#endif /* S390_ISM_H */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
new file mode 100644 (file)
index 0000000..c063189
--- /dev/null
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISM driver for s390.
+ *
+ * Copyright IBM Corp. 2018
+ */
+#define KMSG_COMPONENT "ism"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/err.h>
+#include <net/smc.h>
+
+#include <asm/debug.h>
+
+#include "ism.h"
+
+MODULE_DESCRIPTION("ISM driver for s390");
+MODULE_LICENSE("GPL");
+
+#define PCI_DEVICE_ID_IBM_ISM 0x04ED
+#define DRV_NAME "ism"
+
+static const struct pci_device_id ism_device_table[] = {
+       { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ism_device_table);
+
+static debug_info_t *ism_debug_info;
+
+static int ism_cmd(struct ism_dev *ism, void *cmd)
+{
+       struct ism_req_hdr *req = cmd;
+       struct ism_resp_hdr *resp = cmd;
+
+       memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
+       memcpy_toio(ism->ctl, req, sizeof(*req));
+
+       WRITE_ONCE(resp->ret, ISM_ERROR);
+
+       memcpy_fromio(resp, ism->ctl, sizeof(*resp));
+       if (resp->ret) {
+               debug_text_event(ism_debug_info, 0, "cmd failure");
+               debug_event(ism_debug_info, 0, resp, sizeof(*resp));
+               goto out;
+       }
+       memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
+                     resp->len - sizeof(*resp));
+out:
+       return resp->ret;
+}
+
+static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
+{
+       union ism_cmd_simple cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = cmd_code;
+       cmd.request.hdr.len = sizeof(cmd.request);
+
+       return ism_cmd(ism, &cmd);
+}
+
+static int query_info(struct ism_dev *ism)
+{
+       union ism_qi cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = ISM_QUERY_INFO;
+       cmd.request.hdr.len = sizeof(cmd.request);
+
+       if (ism_cmd(ism, &cmd))
+               goto out;
+
+       debug_text_event(ism_debug_info, 3, "query info");
+       debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
+out:
+       return 0;
+}
+
+static int register_sba(struct ism_dev *ism)
+{
+       union ism_reg_sba cmd;
+       dma_addr_t dma_handle;
+       struct ism_sba *sba;
+
+       sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
+                                 &dma_handle, GFP_KERNEL);
+       if (!sba)
+               return -ENOMEM;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = ISM_REG_SBA;
+       cmd.request.hdr.len = sizeof(cmd.request);
+       cmd.request.sba = dma_handle;
+
+       if (ism_cmd(ism, &cmd)) {
+               dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
+               return -EIO;
+       }
+
+       ism->sba = sba;
+       ism->sba_dma_addr = dma_handle;
+
+       return 0;
+}
+
+static int register_ieq(struct ism_dev *ism)
+{
+       union ism_reg_ieq cmd;
+       dma_addr_t dma_handle;
+       struct ism_eq *ieq;
+
+       ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
+                                 &dma_handle, GFP_KERNEL);
+       if (!ieq)
+               return -ENOMEM;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = ISM_REG_IEQ;
+       cmd.request.hdr.len = sizeof(cmd.request);
+       cmd.request.ieq = dma_handle;
+       cmd.request.len = sizeof(*ieq);
+
+       if (ism_cmd(ism, &cmd)) {
+               dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
+               return -EIO;
+       }
+
+       ism->ieq = ieq;
+       ism->ieq_idx = -1;
+       ism->ieq_dma_addr = dma_handle;
+
+       return 0;
+}
+
+static int unregister_sba(struct ism_dev *ism)
+{
+       if (!ism->sba)
+               return 0;
+
+       if (ism_cmd_simple(ism, ISM_UNREG_SBA))
+               return -EIO;
+
+       dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
+                         ism->sba, ism->sba_dma_addr);
+
+       ism->sba = NULL;
+       ism->sba_dma_addr = 0;
+
+       return 0;
+}
+
+static int unregister_ieq(struct ism_dev *ism)
+{
+       if (!ism->ieq)
+               return 0;
+
+       if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
+               return -EIO;
+
+       dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
+                         ism->ieq, ism->ieq_dma_addr);
+
+       ism->ieq = NULL;
+       ism->ieq_dma_addr = 0;
+
+       return 0;
+}
+
+static int ism_read_local_gid(struct ism_dev *ism)
+{
+       union ism_read_gid cmd;
+       int ret;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = ISM_READ_GID;
+       cmd.request.hdr.len = sizeof(cmd.request);
+
+       ret = ism_cmd(ism, &cmd);
+       if (ret)
+               goto out;
+
+       ism->smcd->local_gid = cmd.response.gid;
+out:
+       return ret;
+}
+
+static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
+                         u32 vid)
+{
+       struct ism_dev *ism = smcd->priv;
+       union ism_query_rgid cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = ISM_QUERY_RGID;
+       cmd.request.hdr.len = sizeof(cmd.request);
+
+       cmd.request.rgid = rgid;
+       cmd.request.vlan_valid = vid_valid;
+       cmd.request.vlan_id = vid;
+
+       return ism_cmd(ism, &cmd);
+}
+
+static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+{
+       clear_bit(dmb->sba_idx, ism->sba_bitmap);
+       dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
+                         dmb->cpu_addr, dmb->dma_addr);
+}
+
+static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+{
+       unsigned long bit;
+
+       if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
+               return -EINVAL;
+
+       if (!dmb->sba_idx) {
+               bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
+                                        ISM_DMB_BIT_OFFSET);
+               if (bit == ISM_NR_DMBS)
+                       return -ENOMEM;
+
+               dmb->sba_idx = bit;
+       }
+       if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
+           test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
+               return -EINVAL;
+
+       dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len,
+                                           &dmb->dma_addr, GFP_KERNEL |
+                                           __GFP_NOWARN | __GFP_NOMEMALLOC |
+                                           __GFP_COMP | __GFP_NORETRY);
+       if (!dmb->cpu_addr)
+               clear_bit(dmb->sba_idx, ism->sba_bitmap);
+
+       return dmb->cpu_addr ? 0 : -ENOMEM;
+}
+
+static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+       struct ism_dev *ism = smcd->priv;
+       union ism_reg_dmb cmd;
+       int ret;
+
+       ret = ism_alloc_dmb(ism, dmb);
+       if (ret)
+               goto out;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = ISM_REG_DMB;
+       cmd.request.hdr.len = sizeof(cmd.request);
+
+       cmd.request.dmb = dmb->dma_addr;
+       cmd.request.dmb_len = dmb->dmb_len;
+       cmd.request.sba_idx = dmb->sba_idx;
+       cmd.request.vlan_valid = dmb->vlan_valid;
+       cmd.request.vlan_id = dmb->vlan_id;
+       cmd.request.rgid = dmb->rgid;
+
+       ret = ism_cmd(ism, &cmd);
+       if (ret) {
+               ism_free_dmb(ism, dmb);
+               goto out;
+       }
+       dmb->dmb_tok = cmd.response.dmb_tok;
+out:
+       return ret;
+}
+
+static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+       struct ism_dev *ism = smcd->priv;
+       union ism_unreg_dmb cmd;
+       int ret;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = ISM_UNREG_DMB;
+       cmd.request.hdr.len = sizeof(cmd.request);
+
+       cmd.request.dmb_tok = dmb->dmb_tok;
+
+       ret = ism_cmd(ism, &cmd);
+       if (ret)
+               goto out;
+
+       ism_free_dmb(ism, dmb);
+out:
+       return ret;
+}
+
+static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+       struct ism_dev *ism = smcd->priv;
+       union ism_set_vlan_id cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
+       cmd.request.hdr.len = sizeof(cmd.request);
+
+       cmd.request.vlan_id = vlan_id;
+
+       return ism_cmd(ism, &cmd);
+}
+
+static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+       struct ism_dev *ism = smcd->priv;
+       union ism_set_vlan_id cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
+       cmd.request.hdr.len = sizeof(cmd.request);
+
+       cmd.request.vlan_id = vlan_id;
+
+       return ism_cmd(ism, &cmd);
+}
+
+static int ism_set_vlan_required(struct smcd_dev *smcd)
+{
+       return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
+}
+
+static int ism_reset_vlan_required(struct smcd_dev *smcd)
+{
+       return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
+}
+
+static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
+                         u32 event_code, u64 info)
+{
+       struct ism_dev *ism = smcd->priv;
+       union ism_sig_ieq cmd;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+       cmd.request.hdr.len = sizeof(cmd.request);
+
+       cmd.request.rgid = rgid;
+       cmd.request.trigger_irq = trigger_irq;
+       cmd.request.event_code = event_code;
+       cmd.request.info = info;
+
+       return ism_cmd(ism, &cmd);
+}
+
+static unsigned int max_bytes(unsigned int start, unsigned int len,
+                             unsigned int boundary)
+{
+       return min(boundary - (start & (boundary - 1)), len);
+}
+
+static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
+                   bool sf, unsigned int offset, void *data, unsigned int size)
+{
+       struct ism_dev *ism = smcd->priv;
+       unsigned int bytes;
+       u64 dmb_req;
+       int ret;
+
+       while (size) {
+               bytes = max_bytes(offset, size, PAGE_SIZE);
+               dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
+                                        offset);
+
+               ret = __ism_move(ism, dmb_req, data, bytes);
+               if (ret)
+                       return ret;
+
+               size -= bytes;
+               data += bytes;
+               offset += bytes;
+       }
+
+       return 0;
+}
+
+static void ism_handle_event(struct ism_dev *ism)
+{
+       struct smcd_event *entry;
+
+       while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
+               if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
+                       ism->ieq_idx = 0;
+
+               entry = &ism->ieq->entry[ism->ieq_idx];
+               debug_event(ism_debug_info, 2, entry, sizeof(*entry));
+               smcd_handle_event(ism->smcd, entry);
+       }
+}
+
+static irqreturn_t ism_handle_irq(int irq, void *data)
+{
+       struct ism_dev *ism = data;
+       unsigned long bit, end;
+       unsigned long *bv;
+
+       bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
+       end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
+
+       spin_lock(&ism->lock);
+       ism->sba->s = 0;
+       barrier();
+       for (bit = 0;;) {
+               bit = find_next_bit_inv(bv, end, bit);
+               if (bit >= end)
+                       break;
+
+               clear_bit_inv(bit, bv);
+               barrier();
+               smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
+               ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
+       }
+
+       if (ism->sba->e) {
+               ism->sba->e = 0;
+               barrier();
+               ism_handle_event(ism);
+       }
+       spin_unlock(&ism->lock);
+       return IRQ_HANDLED;
+}
+
+static const struct smcd_ops ism_ops = {
+       .query_remote_gid = ism_query_rgid,
+       .register_dmb = ism_register_dmb,
+       .unregister_dmb = ism_unregister_dmb,
+       .add_vlan_id = ism_add_vlan_id,
+       .del_vlan_id = ism_del_vlan_id,
+       .set_vlan_required = ism_set_vlan_required,
+       .reset_vlan_required = ism_reset_vlan_required,
+       .signal_event = ism_signal_ieq,
+       .move_data = ism_move,
+};
+
+static int ism_dev_init(struct ism_dev *ism)
+{
+       struct pci_dev *pdev = ism->pdev;
+       int ret;
+
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+       if (ret <= 0)
+               goto out;
+
+       ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
+                         pci_name(pdev), ism);
+       if (ret)
+               goto free_vectors;
+
+       ret = register_sba(ism);
+       if (ret)
+               goto free_irq;
+
+       ret = register_ieq(ism);
+       if (ret)
+               goto unreg_sba;
+
+       ret = ism_read_local_gid(ism);
+       if (ret)
+               goto unreg_ieq;
+
+       ret = smcd_register_dev(ism->smcd);
+       if (ret)
+               goto unreg_ieq;
+
+       query_info(ism);
+       return 0;
+
+unreg_ieq:
+       unregister_ieq(ism);
+unreg_sba:
+       unregister_sba(ism);
+free_irq:
+       free_irq(pci_irq_vector(pdev, 0), ism);
+free_vectors:
+       pci_free_irq_vectors(pdev);
+out:
+       return ret;
+}
+
+static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct ism_dev *ism;
+       int ret;
+
+       ism = kzalloc(sizeof(*ism), GFP_KERNEL);
+       if (!ism)
+               return -ENOMEM;
+
+       spin_lock_init(&ism->lock);
+       dev_set_drvdata(&pdev->dev, ism);
+       ism->pdev = pdev;
+
+       ret = pci_enable_device_mem(pdev);
+       if (ret)
+               goto err;
+
+       ret = pci_request_mem_regions(pdev, DRV_NAME);
+       if (ret)
+               goto err_disable;
+
+       ism->ctl = pci_iomap(pdev, 2, 0);
+       if (!ism->ctl)
+               goto err_resource;
+
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (ret)
+               goto err_unmap;
+
+       pci_set_dma_seg_boundary(pdev, SZ_1M - 1);
+       pci_set_dma_max_seg_size(pdev, SZ_1M);
+       pci_set_master(pdev);
+
+       ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
+                                  ISM_NR_DMBS);
+       if (!ism->smcd)
+               goto err_unmap;
+
+       ism->smcd->priv = ism;
+       ret = ism_dev_init(ism);
+       if (ret)
+               goto err_free;
+
+       return 0;
+
+err_free:
+       smcd_free_dev(ism->smcd);
+err_unmap:
+       pci_iounmap(pdev, ism->ctl);
+err_resource:
+       pci_release_mem_regions(pdev);
+err_disable:
+       pci_disable_device(pdev);
+err:
+       kfree(ism);
+       dev_set_drvdata(&pdev->dev, NULL);
+       return ret;
+}
+
+static void ism_dev_exit(struct ism_dev *ism)
+{
+       struct pci_dev *pdev = ism->pdev;
+
+       smcd_unregister_dev(ism->smcd);
+       unregister_ieq(ism);
+       unregister_sba(ism);
+       free_irq(pci_irq_vector(pdev, 0), ism);
+       pci_free_irq_vectors(pdev);
+}
+
+static void ism_remove(struct pci_dev *pdev)
+{
+       struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
+
+       ism_dev_exit(ism);
+
+       smcd_free_dev(ism->smcd);
+       pci_iounmap(pdev, ism->ctl);
+       pci_release_mem_regions(pdev);
+       pci_disable_device(pdev);
+       dev_set_drvdata(&pdev->dev, NULL);
+       kfree(ism);
+}
+
+static int ism_suspend(struct device *dev)
+{
+       struct ism_dev *ism = dev_get_drvdata(dev);
+
+       ism_dev_exit(ism);
+       return 0;
+}
+
+static int ism_resume(struct device *dev)
+{
+       struct ism_dev *ism = dev_get_drvdata(dev);
+
+       return ism_dev_init(ism);
+}
+
+static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
+
+static struct pci_driver ism_driver = {
+       .name     = DRV_NAME,
+       .id_table = ism_device_table,
+       .probe    = ism_probe,
+       .remove   = ism_remove,
+       .driver   = {
+               .pm = &ism_pm_ops,
+       },
+};
+
+static int __init ism_init(void)
+{
+       int ret;
+
+       ism_debug_info = debug_register("ism", 2, 1, 16);
+       if (!ism_debug_info)
+               return -ENODEV;
+
+       debug_register_view(ism_debug_info, &debug_hex_ascii_view);
+       ret = pci_register_driver(&ism_driver);
+       if (ret)
+               debug_unregister(ism_debug_info);
+
+       return ret;
+}
+
+static void __exit ism_exit(void)
+{
+       pci_unregister_driver(&ism_driver);
+       debug_unregister(ism_debug_info);
+}
+
+module_init(ism_init);
+module_exit(ism_exit);
index 2a5fec55bf60f6f30fd684e1c8c5c9a594aa8e75..a932aac62d0e8ddb53c2d337e7a0edc117589d3f 100644 (file)
@@ -465,7 +465,6 @@ struct qeth_qdio_out_buffer {
        struct sk_buff_head skb_list;
        int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
 
-       struct qaob *aob;
        struct qeth_qdio_out_q *q;
        struct qeth_qdio_out_buffer *next_pending;
 };
@@ -662,7 +661,6 @@ struct qeth_card_info {
        int portno;
        enum qeth_card_types type;
        enum qeth_link_types link_type;
-       int is_multicast_different;
        int initial_mtu;
        int max_mtu;
        int broadcast_capable;
@@ -829,6 +827,17 @@ struct qeth_trap_id {
 /*some helper functions*/
 #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
 
+static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
+                                         unsigned int elements)
+{
+       unsigned int i;
+
+       for (i = 0; i < elements; i++)
+               memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
+       buf->element[14].sflags = 0;
+       buf->element[15].sflags = 0;
+}
+
 /**
  * qeth_get_elements_for_range() -     find number of SBALEs to cover range.
  * @start:                             Start of the address range.
@@ -924,6 +933,19 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
                                                 data, QETH_PROT_IPV6);
 }
 
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
+                           int ipv);
+static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
+                                                       struct sk_buff *skb,
+                                                       int ipv, int cast_type)
+{
+       if (IS_IQD(card) && cast_type != RTN_UNICAST)
+               return card->qdio.out_qs[card->qdio.no_out_queues - 1];
+       if (!card->qdio.do_prio_queueing)
+               return card->qdio.out_qs[card->qdio.default_out_queue];
+       return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)];
+}
+
 extern struct qeth_discipline qeth_l2_discipline;
 extern struct qeth_discipline qeth_l3_discipline;
 extern const struct attribute_group *qeth_generic_attr_groups[];
@@ -961,7 +983,6 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
                  void *);
 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
                        enum qeth_ipa_cmds, enum qeth_prot_versions);
-int qeth_query_setadapterparms(struct qeth_card *);
 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
                struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
                struct qeth_hdr **);
@@ -987,11 +1008,6 @@ int qeth_query_switch_attributes(struct qeth_card *card,
 int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
        int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
        void *reply_param);
-int qeth_bridgeport_query_ports(struct qeth_card *card,
-       enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
-int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
-int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
-int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
 int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
                         int extra_elems, int data_offset);
 int qeth_get_elements_for_frags(struct sk_buff *);
@@ -1015,7 +1031,6 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
 int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
 int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
 int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
-int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
 void qeth_trace_features(struct qeth_card *);
 void qeth_close_dev(struct qeth_card *);
 int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
@@ -1029,7 +1044,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
                                                 __u16, __u16,
                                                 enum qeth_prot_versions);
 int qeth_set_features(struct net_device *, netdev_features_t);
-void qeth_recover_features(struct net_device *dev);
+void qeth_enable_hw_features(struct net_device *dev);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
 netdev_features_t qeth_features_check(struct sk_buff *skb,
                                      struct net_device *dev,
index 8e1474f1ffacfb22b773b02aa1bff6ff91c61ce9..d80972b9bfc7a52d581d40796f32e08aa8af367f 100644 (file)
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
                struct qeth_qdio_out_buffer *buf,
                enum iucv_tx_notify notification);
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
-               struct qeth_qdio_out_buffer *buf,
-               enum qeth_qdio_buffer_states newbufstate);
 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 struct workqueue_struct *qeth_wq;
@@ -476,7 +473,6 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
        if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
                                        QETH_QDIO_BUF_HANDLED_DELAYED)) {
                /* for recovery situations */
-               q->bufs[bidx]->aob = q->bufstates[bidx].aob;
                qeth_init_qdio_out_buf(q, bidx);
                QETH_CARD_TEXT(q->card, 2, "clprecov");
        }
@@ -489,6 +485,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
        struct qaob *aob;
        struct qeth_qdio_out_buffer *buffer;
        enum iucv_tx_notify notification;
+       unsigned int i;
 
        aob = (struct qaob *) phys_to_virt(phys_aob_addr);
        QETH_CARD_TEXT(card, 5, "haob");
@@ -512,11 +509,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
        }
        qeth_notify_skbs(buffer->q, buffer, notification);
 
-       buffer->aob = NULL;
-       qeth_clear_output_buffer(buffer->q, buffer,
-                                QETH_QDIO_BUF_HANDLED_DELAYED);
+       /* Free dangling allocations. The attached skbs are handled by
+        * qeth_cleanup_handled_pending().
+        */
+       for (i = 0;
+            i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+            i++) {
+               if (aob->sba[i] && buffer->is_header[i])
+                       kmem_cache_free(qeth_core_header_cache,
+                                       (void *) aob->sba[i]);
+       }
+       atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
 
-       /* from here on: do not touch buffer anymore */
        qdio_release_aob(aob);
 }
 
@@ -1261,8 +1265,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
 }
 
 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
-               struct qeth_qdio_out_buffer *buf,
-               enum qeth_qdio_buffer_states newbufstate)
+                                    struct qeth_qdio_out_buffer *buf)
 {
        int i;
 
@@ -1270,23 +1273,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
        if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
                atomic_dec(&queue->set_pci_flags_count);
 
-       if (newbufstate == QETH_QDIO_BUF_EMPTY) {
-               qeth_release_skbs(buf);
-       }
+       qeth_release_skbs(buf);
+
        for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
                if (buf->buffer->element[i].addr && buf->is_header[i])
                        kmem_cache_free(qeth_core_header_cache,
                                buf->buffer->element[i].addr);
                buf->is_header[i] = 0;
-               buf->buffer->element[i].length = 0;
-               buf->buffer->element[i].addr = NULL;
-               buf->buffer->element[i].eflags = 0;
-               buf->buffer->element[i].sflags = 0;
        }
-       buf->buffer->element[15].eflags = 0;
-       buf->buffer->element[15].sflags = 0;
+
+       qeth_scrub_qdio_buffer(buf->buffer,
+                              QETH_MAX_BUFFER_ELEMENTS(queue->card));
        buf->next_element_to_fill = 0;
-       atomic_set(&buf->state, newbufstate);
+       atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
 }
 
 static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
@@ -1297,7 +1296,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
                if (!q->bufs[j])
                        continue;
                qeth_cleanup_handled_pending(q, j, 1);
-               qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
+               qeth_clear_output_buffer(q, q->bufs[j]);
                if (free) {
                        kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
                        q->bufs[j] = NULL;
@@ -1538,8 +1537,6 @@ static void qeth_determine_card_type(struct qeth_card *card)
        card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
        card->info.type = CARD_RDEV(card)->id.driver_info;
        card->qdio.no_out_queues = QETH_MAX_QUEUES;
-       if (card->info.type == QETH_CARD_TYPE_IQD)
-               card->info.is_multicast_different = 0x0103;
        qeth_update_from_chp_desc(card);
 }
 
@@ -2467,32 +2464,20 @@ static int qeth_ulp_setup(struct qeth_card *card)
 
 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
 {
-       int rc;
        struct qeth_qdio_out_buffer *newbuf;
 
-       rc = 0;
        newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
-       if (!newbuf) {
-               rc = -ENOMEM;
-               goto out;
-       }
+       if (!newbuf)
+               return -ENOMEM;
+
        newbuf->buffer = q->qdio_bufs[bidx];
        skb_queue_head_init(&newbuf->skb_list);
        lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
        newbuf->q = q;
-       newbuf->aob = NULL;
        newbuf->next_pending = q->bufs[bidx];
        atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
        q->bufs[bidx] = newbuf;
-       if (q->bufstates) {
-               q->bufstates[bidx].user = newbuf;
-               QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx);
-               QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf);
-               QETH_CARD_TEXT_(q->card, 2, "%lx",
-                               (long) newbuf->next_pending);
-       }
-out:
-       return rc;
+       return 0;
 }
 
 static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
@@ -2902,8 +2887,7 @@ int qeth_init_qdio_queues(struct qeth_card *card)
                                   QDIO_MAX_BUFFERS_PER_Q);
                for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
                        qeth_clear_output_buffer(card->qdio.out_qs[i],
-                                       card->qdio.out_qs[i]->bufs[j],
-                                       QETH_QDIO_BUF_EMPTY);
+                                                card->qdio.out_qs[i]->bufs[j]);
                }
                card->qdio.out_qs[i]->card = card;
                card->qdio.out_qs[i]->next_buf_to_fill = 0;
@@ -3070,7 +3054,7 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
        return iob;
 }
 
-int qeth_query_setadapterparms(struct qeth_card *card)
+static int qeth_query_setadapterparms(struct qeth_card *card)
 {
        int rc;
        struct qeth_cmd_buffer *iob;
@@ -3083,7 +3067,6 @@ int qeth_query_setadapterparms(struct qeth_card *card)
        rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
        return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
 
 static int qeth_query_ipassists_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
@@ -3123,7 +3106,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
        return 0;
 }
 
-int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
+static int qeth_query_ipassists(struct qeth_card *card,
+                               enum qeth_prot_versions prot)
 {
        int rc;
        struct qeth_cmd_buffer *iob;
@@ -3135,7 +3119,6 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
        rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
        return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_query_ipassists);
 
 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
                                struct qeth_reply *reply, unsigned long data)
@@ -3174,7 +3157,6 @@ int qeth_query_switch_attributes(struct qeth_card *card,
        return qeth_send_ipa_cmd(card, iob,
                                qeth_query_switch_attributes_cb, sw_info);
 }
-EXPORT_SYMBOL_GPL(qeth_query_switch_attributes);
 
 static int qeth_query_setdiagass_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
@@ -3628,10 +3610,10 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
 }
 EXPORT_SYMBOL_GPL(qeth_configure_cq);
 
-
-static void qeth_qdio_cq_handler(struct qeth_card *card,
-               unsigned int qdio_err,
-               unsigned int queue, int first_element, int count) {
+static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
+                                unsigned int queue, int first_element,
+                                int count)
+{
        struct qeth_qdio_q *cq = card->qdio.c_q;
        int i;
        int rc;
@@ -3657,25 +3639,17 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
        for (i = first_element; i < first_element + count; ++i) {
                int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
                struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
-               int e;
+               int e = 0;
 
-               e = 0;
                while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
                       buffer->element[e].addr) {
                        unsigned long phys_aob_addr;
 
                        phys_aob_addr = (unsigned long) buffer->element[e].addr;
                        qeth_qdio_handle_aob(card, phys_aob_addr);
-                       buffer->element[e].addr = NULL;
-                       buffer->element[e].eflags = 0;
-                       buffer->element[e].sflags = 0;
-                       buffer->element[e].length = 0;
-
                        ++e;
                }
-
-               buffer->element[15].eflags = 0;
-               buffer->element[15].sflags = 0;
+               qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
        }
        rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
                    card->qdio.c_q->next_buf_to_init,
@@ -3754,11 +3728,11 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
                                qeth_notify_skbs(queue, buffer,
                                                 TX_NOTIFY_PENDING);
                        }
-                       buffer->aob = queue->bufstates[bidx].aob;
                        QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
-                       QETH_CARD_TEXT(queue->card, 5, "aob");
-                       QETH_CARD_TEXT_(queue->card, 5, "%lx",
-                                       virt_to_phys(buffer->aob));
+
+                       /* prepare the queue slot for re-use: */
+                       qeth_scrub_qdio_buffer(buffer->buffer,
+                                              QETH_MAX_BUFFER_ELEMENTS(card));
                        if (qeth_init_qdio_out_buf(queue, bidx)) {
                                QETH_CARD_TEXT(card, 2, "outofbuf");
                                qeth_schedule_recovery(card);
@@ -3772,8 +3746,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
                                qeth_notify_skbs(queue, buffer, n);
                        }
 
-                       qeth_clear_output_buffer(queue, buffer,
-                                               QETH_QDIO_BUF_EMPTY);
+                       qeth_clear_output_buffer(queue, buffer);
                }
                qeth_cleanup_handled_pending(queue, bidx, 0);
        }
@@ -3800,15 +3773,11 @@ static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
  * Note: Function assumes that we have 4 outbound queues.
  */
 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
-                       int ipv, int cast_type)
+                           int ipv)
 {
        __be16 *tci;
        u8 tos;
 
-       if (cast_type && card->info.is_multicast_different)
-               return card->info.is_multicast_different &
-                       (card->qdio.no_out_queues - 1);
-
        switch (card->qdio.do_prio_queueing) {
        case QETH_PRIO_Q_ING_TOS:
        case QETH_PRIO_Q_ING_PREC:
@@ -4834,7 +4803,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
                goto out;
        }
 
-       ccw_device_get_id(CARD_RDEV(card), &id);
+       ccw_device_get_id(CARD_DDEV(card), &id);
        request->resp_buf_len = sizeof(*response);
        request->resp_version = DIAG26C_VERSION2;
        request->op_code = DIAG26C_GET_MAC;
@@ -5877,31 +5846,13 @@ static int qeth_core_restore(struct ccwgroup_device *gdev)
        return 0;
 }
 
-static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
-       .driver = {
-               .owner = THIS_MODULE,
-               .name = "qeth",
-       },
-       .ccw_driver = &qeth_ccw_driver,
-       .setup = qeth_core_probe_device,
-       .remove = qeth_core_remove_device,
-       .set_online = qeth_core_set_online,
-       .set_offline = qeth_core_set_offline,
-       .shutdown = qeth_core_shutdown,
-       .prepare = NULL,
-       .complete = NULL,
-       .freeze = qeth_core_freeze,
-       .thaw = qeth_core_thaw,
-       .restore = qeth_core_restore,
-};
-
 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
                           size_t count)
 {
        int err;
 
-       err = ccwgroup_create_dev(qeth_core_root_dev,
-                                 &qeth_core_ccwgroup_driver, 3, buf);
+       err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
+                                 buf);
 
        return err ? err : count;
 }
@@ -5919,6 +5870,25 @@ static const struct attribute_group *qeth_drv_attr_groups[] = {
        NULL,
 };
 
+static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
+       .driver = {
+               .groups = qeth_drv_attr_groups,
+               .owner = THIS_MODULE,
+               .name = "qeth",
+       },
+       .ccw_driver = &qeth_ccw_driver,
+       .setup = qeth_core_probe_device,
+       .remove = qeth_core_remove_device,
+       .set_online = qeth_core_set_online,
+       .set_offline = qeth_core_set_offline,
+       .shutdown = qeth_core_shutdown,
+       .prepare = NULL,
+       .complete = NULL,
+       .freeze = qeth_core_freeze,
+       .thaw = qeth_core_thaw,
+       .restore = qeth_core_restore,
+};
+
 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct qeth_card *card = dev->ml_priv;
@@ -6459,28 +6429,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
 #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
                          NETIF_F_IPV6_CSUM)
 /**
- * qeth_recover_features() - Restore device features after recovery
- * @dev:       the recovering net_device
- *
- * Caller must hold rtnl lock.
+ * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
+ * @dev:       a net_device
  */
-void qeth_recover_features(struct net_device *dev)
+void qeth_enable_hw_features(struct net_device *dev)
 {
-       netdev_features_t features = dev->features;
        struct qeth_card *card = dev->ml_priv;
+       netdev_features_t features;
 
+       rtnl_lock();
+       features = dev->features;
        /* force-off any feature that needs an IPA sequence.
         * netdev_update_features() will restart them.
         */
        dev->features &= ~QETH_HW_FEATURES;
        netdev_update_features(dev);
-
-       if (features == dev->features)
-               return;
-       dev_warn(&card->gdev->dev,
-                "Device recovery failed to restore all offload features\n");
+       if (features != dev->features)
+               dev_warn(&card->gdev->dev,
+                        "Device recovery failed to restore all offload features\n");
+       rtnl_unlock();
 }
-EXPORT_SYMBOL_GPL(qeth_recover_features);
+EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
 
 int qeth_set_features(struct net_device *dev, netdev_features_t features)
 {
@@ -6611,7 +6580,6 @@ static int __init qeth_core_init(void)
        rc = ccw_driver_register(&qeth_ccw_driver);
        if (rc)
                goto ccw_err;
-       qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups;
        rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
        if (rc)
                goto ccwgroup_err;
index 878e62f3516915081c7a4f834724a67afd8485f4..54c35224262a802394baaa40b0437bd0e3c214ef 100644 (file)
@@ -64,6 +64,8 @@ enum qeth_card_types {
        QETH_CARD_TYPE_OSX     = 2,
 };
 
+#define IS_IQD(card)   ((card)->info.type == QETH_CARD_TYPE_IQD)
+
 #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
 /* only the first two bytes are looked at in qeth_get_cardname_short */
 enum qeth_link_types {
index f2130051ca11a9c609110661170fd13424f0ab37..ddc615b431a8c826c29a46cbde29bfa47b08caa4 100644 (file)
@@ -14,6 +14,11 @@ extern const struct attribute_group *qeth_l2_attr_groups[];
 int qeth_l2_create_device_attributes(struct device *);
 void qeth_l2_remove_device_attributes(struct device *);
 void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
+int qeth_bridgeport_query_ports(struct qeth_card *card,
+                               enum qeth_sbp_roles *role,
+                               enum qeth_sbp_states *state);
+int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
+int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
 
 int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state);
 int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state);
index a7cb37da6a21313eda8d03119135f1475d35f47d..8ac243de7a9e4e08f4862ea3b85c247c260bf913 100644 (file)
@@ -26,7 +26,6 @@
 
 static int qeth_l2_set_offline(struct ccwgroup_device *);
 static int qeth_l2_stop(struct net_device *);
-static void qeth_l2_set_rx_mode(struct net_device *);
 static void qeth_bridgeport_query_support(struct qeth_card *card);
 static void qeth_bridge_state_change(struct qeth_card *card,
                                        struct qeth_ipa_cmd *cmd);
@@ -140,7 +139,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 
 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 {
-       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
                                        IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
        int rc;
 
@@ -157,7 +156,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 
 static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
 {
-       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
                                        IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
        int rc;
 
@@ -186,12 +185,12 @@ static void qeth_l2_del_all_macs(struct qeth_card *card)
 static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
 {
        if (card->info.type == QETH_CARD_TYPE_OSN)
-               return RTN_UNSPEC;
+               return RTN_UNICAST;
        if (is_broadcast_ether_addr(skb->data))
                return RTN_BROADCAST;
        if (is_multicast_ether_addr(skb->data))
                return RTN_MULTICAST;
-       return RTN_UNSPEC;
+       return RTN_UNICAST;
 }
 
 static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
@@ -344,7 +343,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
                rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
                kfree(tmpid);
        }
-       qeth_l2_set_rx_mode(card->dev);
        return rc;
 }
 
@@ -501,27 +499,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                return -ERESTARTSYS;
        }
 
+       /* avoid racing against concurrent state change: */
+       if (!mutex_trylock(&card->conf_mutex))
+               return -EAGAIN;
+
        if (!qeth_card_hw_is_reachable(card)) {
                ether_addr_copy(dev->dev_addr, addr->sa_data);
-               return 0;
+               goto out_unlock;
        }
 
        /* don't register the same address twice */
        if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
            (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
-               return 0;
+               goto out_unlock;
 
        /* add the new address, switch over, drop the old */
        rc = qeth_l2_send_setmac(card, addr->sa_data);
        if (rc)
-               return rc;
+               goto out_unlock;
        ether_addr_copy(old_addr, dev->dev_addr);
        ether_addr_copy(dev->dev_addr, addr->sa_data);
 
        if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
                qeth_l2_remove_mac(card, old_addr);
        card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-       return 0;
+
+out_unlock:
+       mutex_unlock(&card->conf_mutex);
+       return rc;
 }
 
 static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -763,18 +768,13 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
        int tx_bytes = skb->len;
        int rc;
 
-       if (card->qdio.do_prio_queueing || (cast_type &&
-                                       card->info.is_multicast_different))
-               queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
-                                       ipv, cast_type)];
-       else
-               queue = card->qdio.out_qs[card->qdio.default_out_queue];
-
        if ((card->state != CARD_STATE_UP) || !card->lan_online) {
                card->stats.tx_carrier_errors++;
                goto tx_drop;
        }
 
+       queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+
        if (card->options.performance_stats) {
                card->perf_stats.outbound_cnt++;
                card->perf_stats.outbound_start_time = qeth_get_micros();
@@ -1112,20 +1112,18 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                netif_carrier_off(card->dev);
 
        qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+       qeth_enable_hw_features(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                if (recovery_mode &&
                    card->info.type != QETH_CARD_TYPE_OSN) {
                        __qeth_l2_open(card->dev);
+                       qeth_l2_set_rx_mode(card->dev);
                } else {
                        rtnl_lock();
                        dev_open(card->dev);
                        rtnl_unlock();
                }
-               /* this also sets saved unicast addresses */
-               qeth_l2_set_rx_mode(card->dev);
-               rtnl_lock();
-               qeth_recover_features(card->dev);
-               rtnl_unlock();
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -1871,7 +1869,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
                return rc;
        return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
 }
-EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
 
 static int qeth_bridgeport_set_cb(struct qeth_card *card,
        struct qeth_reply *reply, unsigned long data)
@@ -2019,7 +2016,6 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
                rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL);
        return qeth_anset_makerc(card, rc, response);
 }
-EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set);
 
 static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
 {
index e7fa479adf47e0dd41bfacaed8fd347bc40b5581..062f62b4929490504f0b341a9c49f2e8c25e5e21 100644 (file)
@@ -1978,17 +1978,17 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
                    (cast_type == RTN_MULTICAST) ||
                    (cast_type == RTN_ANYCAST))
                        return cast_type;
-               return RTN_UNSPEC;
+               return RTN_UNICAST;
        }
        rcu_read_unlock();
 
        /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
        if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
                return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
-                               RTN_MULTICAST : RTN_UNSPEC;
+                               RTN_MULTICAST : RTN_UNICAST;
        else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
                return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
-                               RTN_MULTICAST : RTN_UNSPEC;
+                               RTN_MULTICAST : RTN_UNICAST;
 
        /* ... and MAC address */
        if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
@@ -1997,22 +1997,21 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
                return RTN_MULTICAST;
 
        /* default to unicast */
-       return RTN_UNSPEC;
+       return RTN_UNICAST;
 }
 
-static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
-               struct qeth_hdr *hdr, struct sk_buff *skb)
+static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
+                                    unsigned int data_len)
 {
        char daddr[16];
        struct af_iucv_trans_hdr *iucv_hdr;
 
        memset(hdr, 0, sizeof(struct qeth_hdr));
        hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
-       hdr->hdr.l3.ext_flags = 0;
-       hdr->hdr.l3.length = skb->len - ETH_HLEN;
+       hdr->hdr.l3.length = data_len;
        hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
 
-       iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
+       iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN);
        memset(daddr, 0, sizeof(daddr));
        daddr[0] = 0xfe;
        daddr[1] = 0x80;
@@ -2051,6 +2050,12 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
        }
 
+       if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
+               qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
+               if (card->options.performance_stats)
+                       card->perf_stats.tx_csum++;
+       }
+
        /* OSA only: */
        if (!ipv) {
                hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
@@ -2156,104 +2161,141 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
        return elements;
 }
 
-static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
-                                          struct net_device *dev)
+static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
+                               struct qeth_qdio_out_q *queue, int ipv,
+                               int cast_type)
 {
-       int rc;
-       __be16 *tag;
+       const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
+       unsigned int frame_len, nr_frags;
+       unsigned char eth_hdr[ETH_HLEN];
+       unsigned int hdr_elements = 0;
        struct qeth_hdr *hdr = NULL;
-       int hdr_elements = 0;
-       int elements;
-       struct qeth_card *card = dev->ml_priv;
-       struct sk_buff *new_skb = NULL;
-       int ipv = qeth_get_ip_version(skb);
-       int cast_type = qeth_l3_get_cast_type(skb);
-       struct qeth_qdio_out_q *queue =
-               card->qdio.out_qs[card->qdio.do_prio_queueing
-                       || (cast_type && card->info.is_multicast_different) ?
-                       qeth_get_priority_queue(card, skb, ipv, cast_type) :
-                       card->qdio.default_out_queue];
-       int tx_bytes = skb->len;
+       int elements, push_len, rc;
        unsigned int hd_len = 0;
-       bool use_tso;
-       int data_offset = -1;
-       unsigned int nr_frags;
-
-       if (((card->info.type == QETH_CARD_TYPE_IQD) &&
-            (((card->options.cq != QETH_CQ_ENABLED) && !ipv) ||
-             ((card->options.cq == QETH_CQ_ENABLED) &&
-              (be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) ||
-           card->options.sniffer)
-                       goto tx_drop;
 
-       if ((card->state != CARD_STATE_UP) || !card->lan_online) {
-               card->stats.tx_carrier_errors++;
-               goto tx_drop;
+       /* compress skb to fit into one IO buffer: */
+       if (!qeth_get_elements_no(card, skb, 0, 0)) {
+               rc = skb_linearize(skb);
+
+               if (card->options.performance_stats) {
+                       if (rc)
+                               card->perf_stats.tx_linfail++;
+                       else
+                               card->perf_stats.tx_lin++;
+               }
+               if (rc)
+                       return rc;
        }
 
-       if ((cast_type == RTN_BROADCAST) &&
-           (card->info.broadcast_capable == 0))
-               goto tx_drop;
+       /* re-use the L2 header area for the HW header: */
+       rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
+       if (rc)
+               return rc;
+       skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
+       skb_pull(skb, ETH_HLEN);
+       frame_len = skb->len;
+       nr_frags = skb_shinfo(skb)->nr_frags;
 
-       if (card->options.performance_stats) {
-               card->perf_stats.outbound_cnt++;
-               card->perf_stats.outbound_start_time = qeth_get_micros();
+       push_len = qeth_push_hdr(skb, &hdr, hw_hdr_len);
+       if (push_len < 0)
+               return push_len;
+       if (!push_len) {
+               /* hdr was added discontiguous from skb->data */
+               hd_len = hw_hdr_len;
+               hdr_elements = 1;
        }
 
-       /* Ignore segment size from skb_is_gso(), 1 page is always used. */
-       use_tso = skb_is_gso(skb) &&
-                 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
+       elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
+       if (!elements) {
+               rc = -E2BIG;
+               goto out;
+       }
+       elements += hdr_elements;
 
-       if (card->info.type == QETH_CARD_TYPE_IQD) {
-               new_skb = skb;
-               data_offset = ETH_HLEN;
-               hd_len = sizeof(*hdr);
-               hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
-               if (!hdr)
-                       goto tx_drop;
-               hdr_elements++;
-       } else {
-               /* create a clone with writeable headroom */
-               new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
-                                       + VLAN_HLEN);
-               if (!new_skb)
-                       goto tx_drop;
+       if (skb->protocol == htons(ETH_P_AF_IUCV))
+               qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
+       else
+               qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
 
-               if (ipv == 4) {
-                       skb_pull(new_skb, ETH_HLEN);
+       if (IS_IQD(card)) {
+               rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
+       } else {
+               /* TODO: drop skb_orphan() once TX completion is fast enough */
+               skb_orphan(skb);
+               rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len,
+                                        elements);
+       }
+out:
+       if (!rc) {
+               if (card->options.performance_stats && nr_frags) {
+                       card->perf_stats.sg_skbs_sent++;
+                       /* nr_frags + skb->data */
+                       card->perf_stats.sg_frags_sent += nr_frags + 1;
                }
-
-               if (ipv != 4 && skb_vlan_tag_present(new_skb)) {
-                       skb_push(new_skb, VLAN_HLEN);
-                       skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
-                       skb_copy_to_linear_data_offset(new_skb, 4,
-                               new_skb->data + 8, 4);
-                       skb_copy_to_linear_data_offset(new_skb, 8,
-                               new_skb->data + 12, 4);
-                       tag = (__be16 *)(new_skb->data + 12);
-                       *tag = cpu_to_be16(ETH_P_8021Q);
-                       *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
+       } else {
+               if (!push_len)
+                       kmem_cache_free(qeth_core_header_cache, hdr);
+               if (rc == -EBUSY) {
+                       /* roll back to ETH header */
+                       skb_pull(skb, push_len);
+                       skb_push(skb, ETH_HLEN);
+                       skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
                }
        }
+       return rc;
+}
 
-       netif_stop_queue(dev);
+static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
+                       struct qeth_qdio_out_q *queue, int ipv, int cast_type)
+{
+       unsigned int hd_len, nr_frags;
+       int elements, len, rc;
+       __be16 *tag;
+       struct qeth_hdr *hdr = NULL;
+       int hdr_elements = 0;
+       struct sk_buff *new_skb = NULL;
+       int tx_bytes = skb->len;
+       bool use_tso;
+
+       /* Ignore segment size from skb_is_gso(), 1 page is always used. */
+       use_tso = skb_is_gso(skb) &&
+                 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
+
+       /* create a clone with writeable headroom */
+       new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
+                                           VLAN_HLEN);
+       if (!new_skb)
+               return -ENOMEM;
+
+       if (ipv == 4) {
+               skb_pull(new_skb, ETH_HLEN);
+       } else if (skb_vlan_tag_present(new_skb)) {
+               skb_push(new_skb, VLAN_HLEN);
+               skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
+               skb_copy_to_linear_data_offset(new_skb, 4,
+                                              new_skb->data + 8, 4);
+               skb_copy_to_linear_data_offset(new_skb, 8,
+                                              new_skb->data + 12, 4);
+               tag = (__be16 *)(new_skb->data + 12);
+               *tag = cpu_to_be16(ETH_P_8021Q);
+               *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
+       }
 
        /* fix hardware limitation: as long as we do not have sbal
         * chaining we can not send long frag lists
         */
-       if ((card->info.type != QETH_CARD_TYPE_IQD) &&
-           ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
-            (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
-               int lin_rc = skb_linearize(new_skb);
+       if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
+           (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) {
+               rc = skb_linearize(new_skb);
 
                if (card->options.performance_stats) {
-                       if (lin_rc)
+                       if (rc)
                                card->perf_stats.tx_linfail++;
                        else
                                card->perf_stats.tx_lin++;
                }
-               if (lin_rc)
-                       goto tx_drop;
+               if (rc)
+                       goto out;
        }
        nr_frags = skb_shinfo(new_skb)->nr_frags;
 
@@ -2265,60 +2307,37 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
                qeth_tso_fill_header(card, hdr, new_skb);
                hdr_elements++;
        } else {
-               if (data_offset < 0) {
-                       hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
-                       qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
-                                           new_skb->len -
-                                           sizeof(struct qeth_hdr));
-               } else {
-                       if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV)
-                               qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
-                       else {
-                               qeth_l3_fill_header(card, hdr, new_skb, ipv,
-                                                   cast_type,
-                                                   new_skb->len - data_offset);
-                       }
-               }
-
-               if (new_skb->ip_summed == CHECKSUM_PARTIAL) {
-                       qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv);
-                       if (card->options.performance_stats)
-                               card->perf_stats.tx_csum++;
-               }
+               hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
+               qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+                                   new_skb->len - sizeof(struct qeth_hdr));
        }
 
        elements = use_tso ?
                   qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
-                  qeth_get_elements_no(card, new_skb, hdr_elements,
-                                       (data_offset > 0) ? data_offset : 0);
+                  qeth_get_elements_no(card, new_skb, hdr_elements, 0);
        if (!elements) {
-               if (data_offset >= 0)
-                       kmem_cache_free(qeth_core_header_cache, hdr);
-               goto tx_drop;
+               rc = -E2BIG;
+               goto out;
        }
        elements += hdr_elements;
 
-       if (card->info.type != QETH_CARD_TYPE_IQD) {
-               int len;
-               if (use_tso) {
-                       hd_len = sizeof(struct qeth_hdr_tso) +
-                                ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
-                       len = hd_len;
-               } else {
-                       len = sizeof(struct qeth_hdr_layer3);
-               }
-
-               if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
-                       goto tx_drop;
-               rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len,
-                                        hd_len, elements);
-       } else
-               rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset,
-                                             hd_len);
+       if (use_tso) {
+               hd_len = sizeof(struct qeth_hdr_tso) +
+                        ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
+               len = hd_len;
+       } else {
+               hd_len = 0;
+               len = sizeof(struct qeth_hdr_layer3);
+       }
 
+       if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
+               rc = -EINVAL;
+               goto out;
+       }
+       rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
+                                elements);
+out:
        if (!rc) {
-               card->stats.tx_packets++;
-               card->stats.tx_bytes += tx_bytes;
                if (new_skb != skb)
                        dev_kfree_skb_any(skb);
                if (card->options.performance_stats) {
@@ -2332,30 +2351,68 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
                                card->perf_stats.sg_frags_sent += nr_frags + 1;
                        }
                }
-               rc = NETDEV_TX_OK;
        } else {
-               if (data_offset >= 0)
-                       kmem_cache_free(qeth_core_header_cache, hdr);
+               if (new_skb != skb)
+                       dev_kfree_skb_any(new_skb);
+       }
+       return rc;
+}
 
-               if (rc == -EBUSY) {
-                       if (new_skb != skb)
-                               dev_kfree_skb_any(new_skb);
-                       return NETDEV_TX_BUSY;
-               } else
+static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
+                                          struct net_device *dev)
+{
+       int cast_type = qeth_l3_get_cast_type(skb);
+       struct qeth_card *card = dev->ml_priv;
+       int ipv = qeth_get_ip_version(skb);
+       struct qeth_qdio_out_q *queue;
+       int tx_bytes = skb->len;
+       int rc;
+
+       if (IS_IQD(card)) {
+               if (card->options.sniffer)
+                       goto tx_drop;
+               if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
+                   (card->options.cq == QETH_CQ_ENABLED &&
+                    skb->protocol != htons(ETH_P_AF_IUCV)))
                        goto tx_drop;
        }
 
-       netif_wake_queue(dev);
-       if (card->options.performance_stats)
-               card->perf_stats.outbound_time += qeth_get_micros() -
-                       card->perf_stats.outbound_start_time;
-       return rc;
+       if (card->state != CARD_STATE_UP || !card->lan_online) {
+               card->stats.tx_carrier_errors++;
+               goto tx_drop;
+       }
+
+       if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
+               goto tx_drop;
+
+       queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+
+       if (card->options.performance_stats) {
+               card->perf_stats.outbound_cnt++;
+               card->perf_stats.outbound_start_time = qeth_get_micros();
+       }
+       netif_stop_queue(dev);
+
+       if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4))
+               rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type);
+       else
+               rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
+
+       if (!rc) {
+               card->stats.tx_packets++;
+               card->stats.tx_bytes += tx_bytes;
+               if (card->options.performance_stats)
+                       card->perf_stats.outbound_time += qeth_get_micros() -
+                               card->perf_stats.outbound_start_time;
+               netif_wake_queue(dev);
+               return NETDEV_TX_OK;
+       } else if (rc == -EBUSY) {
+               return NETDEV_TX_BUSY;
+       } /* else fall through */
 
 tx_drop:
        card->stats.tx_dropped++;
        card->stats.tx_errors++;
-       if ((new_skb != skb) && new_skb)
-               dev_kfree_skb_any(new_skb);
        dev_kfree_skb_any(skb);
        netif_wake_queue(dev);
        return NETDEV_TX_OK;
@@ -2497,9 +2554,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
                        card->dev->dev_id = card->info.unique_id & 0xffff;
 
-               card->dev->hw_features |= NETIF_F_SG;
-               card->dev->vlan_features |= NETIF_F_SG;
-
                if (!card->info.guestlan) {
                        card->dev->features |= NETIF_F_SG;
                        card->dev->hw_features |= NETIF_F_TSO |
@@ -2519,6 +2573,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                        return -ENODEV;
                card->dev->flags |= IFF_NOARP;
                card->dev->netdev_ops = &qeth_l3_netdev_ops;
+
                rc = qeth_l3_iqd_read_initial_mac(card);
                if (rc)
                        return rc;
@@ -2534,12 +2589,18 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
        card->dev->max_mtu = ETH_MAX_MTU;
        card->dev->dev_port = card->info.portno;
        card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
+       card->dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
        card->dev->features |=  NETIF_F_HW_VLAN_CTAG_TX |
                                NETIF_F_HW_VLAN_CTAG_RX |
                                NETIF_F_HW_VLAN_CTAG_FILTER;
+       card->dev->hw_features |= NETIF_F_SG;
+       card->dev->vlan_features |= NETIF_F_SG;
+
        netif_keep_dst(card->dev);
-       netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
-                                         PAGE_SIZE);
+       if (card->dev->hw_features & NETIF_F_TSO)
+               netif_set_gso_max_size(card->dev,
+                                      PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
 
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
@@ -2662,14 +2723,16 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                netif_carrier_on(card->dev);
        else
                netif_carrier_off(card->dev);
+
+       qeth_enable_hw_features(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                rtnl_lock();
-               if (recovery_mode)
+               if (recovery_mode) {
                        __qeth_l3_open(card->dev);
-               else
+                       qeth_l3_set_rx_mode(card->dev);
+               } else {
                        dev_open(card->dev);
-               qeth_l3_set_rx_mode(card->dev);
-               qeth_recover_features(card->dev);
+               }
                rtnl_unlock();
        }
        qeth_trace_features(card);
index 0a9b8b387bd2e70e87310ef7908012a46f32942f..02d65dce74e504230ceb3080b58972d8d5dff950 100644 (file)
@@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
                ioa_cfg->hrrq[i].allow_interrupts = 0;
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
-       wmb();
 
        /* Set interrupt mask to stop all new interrupts */
        if (ioa_cfg->sis64)
@@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
                ioa_cfg->hrrq[i].allow_interrupts = 1;
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
-       wmb();
        if (ioa_cfg->sis64) {
                /* Set the adapter to the correct endian mode. */
                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
index 0fea2e2326becbf4993dd7cc216e36dad529d678..1027b0cb7fa3634baf0bd870ffdc93e9286cac8e 100644 (file)
@@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
 {
        struct qla_tgt *tgt = sess->tgt;
-       struct qla_hw_data *ha = sess->vha->hw;
        unsigned long flags;
 
        if (sess->disc_state == DSC_DELETE_PEND)
@@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
                        return;
        }
 
-       spin_lock_irqsave(&ha->tgt.sess_lock, flags);
        if (sess->deleted == QLA_SESS_DELETED)
                sess->logout_on_delete = 0;
 
+       spin_lock_irqsave(&sess->vha->work_lock, flags);
        if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
-               spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+               spin_unlock_irqrestore(&sess->vha->work_lock, flags);
                return;
        }
        sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+       spin_unlock_irqrestore(&sess->vha->work_lock, flags);
 
        sess->disc_state = DSC_DELETE_PEND;
 
index 24d7496cd9e23cfc2a97126fc22b5f4c25a253b0..364e71861bfd5c2c17caf1e93cdeae669e95b971 100644 (file)
@@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void)
        int k = sdebug_add_host;
 
        stop_all_queued();
-       free_all_queued();
        for (; k; k--)
                sdebug_remove_adapter();
+       free_all_queued();
        driver_unregister(&sdebug_driverfs_driver);
        bus_unregister(&pseudo_lld_bus);
        root_device_unregister(pseudo_primary);
index 1da3d71e9f61f784e8131093bd5378d94bb98745..13948102ca298cf1a20d45d49781aa4dee55d851 100644 (file)
@@ -3592,7 +3592,7 @@ fc_bsg_job_timeout(struct request *req)
 
        /* the blk_end_sync_io() doesn't check the error */
        if (inflight)
-               blk_mq_complete_request(req);
+               __blk_complete_request(req);
        return BLK_EH_DONE;
 }
 
index 36f59a1be7e9a60be61c2b1ba8f7468dfbd8c6c9..61389bdc7926690100fc0a38fc59e8b6a73853ab 100644 (file)
@@ -654,10 +654,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
 static int scsifront_sdev_configure(struct scsi_device *sdev)
 {
        struct vscsifrnt_info *info = shost_priv(sdev->host);
+       int err;
 
-       if (info && current == info->curr)
-               xenbus_printf(XBT_NIL, info->dev->nodename,
+       if (info && current == info->curr) {
+               err = xenbus_printf(XBT_NIL, info->dev->nodename,
                              info->dev_state_path, "%d", XenbusStateConnected);
+               if (err) {
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing dev_state_path", __func__);
+                       return err;
+               }
+       }
 
        return 0;
 }
@@ -665,10 +672,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
 static void scsifront_sdev_destroy(struct scsi_device *sdev)
 {
        struct vscsifrnt_info *info = shost_priv(sdev->host);
+       int err;
 
-       if (info && current == info->curr)
-               xenbus_printf(XBT_NIL, info->dev->nodename,
+       if (info && current == info->curr) {
+               err = xenbus_printf(XBT_NIL, info->dev->nodename,
                              info->dev_state_path, "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing dev_state_path", __func__);
+       }
 }
 
 static struct scsi_host_template scsifront_sht = {
@@ -1003,9 +1015,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
 
                        if (scsi_add_device(info->host, chn, tgt, lun)) {
                                dev_err(&dev->dev, "scsi_add_device\n");
-                               xenbus_printf(XBT_NIL, dev->nodename,
+                               err = xenbus_printf(XBT_NIL, dev->nodename,
                                              info->dev_state_path,
                                              "%d", XenbusStateClosed);
+                               if (err)
+                                       xenbus_dev_error(dev, err,
+                                               "%s: writing dev_state_path", __func__);
                        }
                        break;
                case VSCSIFRONT_OP_DEL_LUN:
@@ -1019,10 +1034,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
                        }
                        break;
                case VSCSIFRONT_OP_READD_LUN:
-                       if (device_state == XenbusStateConnected)
-                               xenbus_printf(XBT_NIL, dev->nodename,
+                       if (device_state == XenbusStateConnected) {
+                               err = xenbus_printf(XBT_NIL, dev->nodename,
                                              info->dev_state_path,
                                              "%d", XenbusStateConnected);
+                               if (err)
+                                       xenbus_dev_error(dev, err,
+                                               "%s: writing dev_state_path", __func__);
+                       }
                        break;
                default:
                        break;
index f4e3bd40c72e60c0448c98456f7b53f6be7936bd..6ef18cf8f24387e324cf455ae98c30f2b27c95d3 100644 (file)
 
 #define GPC_M4_PU_PDN_FLG              0x1bc
 
-
-#define PGC_MIPI                       4
-#define PGC_PCIE                       5
-#define PGC_USB_HSIC                   8
+/*
+ * The PGC offset values in Reference Manual
+ * (Rev. 1, 01/2018 and the older ones) GPC chapter's
+ * GPC_PGC memory map are incorrect, below offset
+ * values are from design RTL.
+ */
+#define PGC_MIPI                       16
+#define PGC_PCIE                       17
+#define PGC_USB_HSIC                   20
 #define GPC_PGC_CTRL(n)                        (0x800 + (n) * 0x40)
 #define GPC_PGC_SR(n)                  (GPC_PGC_CTRL(n) + 0xc)
 
index 9dc02f390ba314bf8cfbb1b86223af1859af2507..5856e792d09c8d317b01627c2d03a97eeaebff37 100644 (file)
@@ -5,7 +5,8 @@ menu "Qualcomm SoC drivers"
 
 config QCOM_COMMAND_DB
        bool "Qualcomm Command DB"
-       depends on (ARCH_QCOM && OF) || COMPILE_TEST
+       depends on ARCH_QCOM || COMPILE_TEST
+       depends on OF_RESERVED_MEM
        help
          Command DB queries shared memory by key string for shared system
          resources. Platform drivers that require to set state of a shared
index 95120acc4d806da630f49737cca1eede3286edae..50d03d8b4f9a55f50d52f328039afe0c27991740 100644 (file)
@@ -194,11 +194,12 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
 
 static bool has_cpg_mstp;
 
-static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
+static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
 {
        struct generic_pm_domain *genpd = &pd->genpd;
        const char *name = pd->genpd.name;
        struct dev_power_governor *gov = &simple_qos_governor;
+       int error;
 
        if (pd->flags & PD_CPU) {
                /*
@@ -251,7 +252,11 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
        rcar_sysc_power_up(&pd->ch);
 
 finalize:
-       pm_genpd_init(genpd, gov, false);
+       error = pm_genpd_init(genpd, gov, false);
+       if (error)
+               pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+       return error;
 }
 
 static const struct of_device_id rcar_sysc_matches[] __initconst = {
@@ -375,6 +380,9 @@ static int __init rcar_sysc_pd_init(void)
        pr_debug("%pOF: syscier = 0x%08x\n", np, syscier);
        iowrite32(syscier, base + SYSCIER);
 
+       /*
+        * First, create all PM domains
+        */
        for (i = 0; i < info->num_areas; i++) {
                const struct rcar_sysc_area *area = &info->areas[i];
                struct rcar_sysc_pd *pd;
@@ -397,14 +405,29 @@ static int __init rcar_sysc_pd_init(void)
                pd->ch.isr_bit = area->isr_bit;
                pd->flags = area->flags;
 
-               rcar_sysc_pd_setup(pd);
-               if (area->parent >= 0)
-                       pm_genpd_add_subdomain(domains->domains[area->parent],
-                                              &pd->genpd);
+               error = rcar_sysc_pd_setup(pd);
+               if (error)
+                       goto out_put;
 
                domains->domains[area->isr_bit] = &pd->genpd;
        }
 
+       /*
+        * Second, link all PM domains to their parents
+        */
+       for (i = 0; i < info->num_areas; i++) {
+               const struct rcar_sysc_area *area = &info->areas[i];
+
+               if (!area->name || area->parent < 0)
+                       continue;
+
+               error = pm_genpd_add_subdomain(domains->domains[area->parent],
+                                              domains->domains[area->isr_bit]);
+               if (error)
+                       pr_warn("Failed to add PM subdomain %s to parent %u\n",
+                               area->name, area->parent);
+       }
+
        error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
 
 out_put:
index e8c4403297082898c54d0aacf09c778a399aa2d0..31db510018a9462ead01b016f02c4952c6871c96 100644 (file)
@@ -30,7 +30,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
        struct page **tmp = pages;
 
        if (!pages)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        if (buffer->flags & ION_FLAG_CACHED)
                pgprot = PAGE_KERNEL;
index ea194aa01a642e0c691c9cb8b78e9d8ef43cfa79..257b0daff01f21317cf1e8f1a251efdbd9dc1189 100644 (file)
@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
        /* Make sure D/A update mode is direct update */
        outb(0, dev->iobase + DAQP_AUX_REG);
 
-       for (i = 0; i > insn->n; i++) {
+       for (i = 0; i < insn->n; i++) {
                unsigned int val = data[i];
                int ret;
 
index e461168313bf95c66e56afff9fba720d9b23c41a..4e6611e4c59beb9cd74d0ec18ea4b6851d5e9f3f 100644 (file)
@@ -290,13 +290,6 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 }
 
-static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
-                               void *accel_priv,
-                               select_queue_fallback_t fallback)
-{
-       return (u16)smp_processor_id();
-}
-
 static void xlr_hw_set_mac_addr(struct net_device *ndev)
 {
        struct xlr_net_priv *priv = netdev_priv(ndev);
@@ -403,7 +396,7 @@ static const struct net_device_ops xlr_netdev_ops = {
        .ndo_open = xlr_net_open,
        .ndo_stop = xlr_net_stop,
        .ndo_start_xmit = xlr_net_start_xmit,
-       .ndo_select_queue = xlr_net_select_queue,
+       .ndo_select_queue = dev_pick_tx_cpu_id,
        .ndo_set_mac_address = xlr_net_set_mac_addr,
        .ndo_set_rx_mode = xlr_set_rx_mode,
        .ndo_get_stats64 = xlr_stats,
index 084a246eec19f8f457eb4dbafb53f0e3129347f3..6790b7c8cfb14f2e7a142dc6d4ec2b5dd3392634 100644 (file)
@@ -575,7 +575,6 @@ enum ht_cap_ampdu_factor {
  * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
  */
 #define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
 
 
 #define OP_MODE_PURE                    0
index add1ba00f3e9ab7c5111f6c3846fa582ed243e53..38e85c8a85c8ba690ae60ef4df9181e959eee0f1 100644 (file)
@@ -253,7 +253,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
 }
 
 static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
-                           void *accel_priv, select_queue_fallback_t fallback)
+                           struct net_device *sb_dev,
+                           select_queue_fallback_t fallback)
 {
        struct adapter  *padapter = rtw_netdev_priv(dev);
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
index 0ed2f44ab4e958e0cb095081ccd13932cd09b284..00a4302e9983f985a6355302b151f69efb7572ec 100644 (file)
@@ -574,7 +574,6 @@ struct ieee80211_ht_addt_info {
  * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
  */
 #define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
 
 
 /* Spatial Multiplexing Power Save Modes */
index 08bc79840b2373ce798fb31eae59bef70f0bbe7d..559bf2606fb7d932e20bac388de8c124f944a7a7 100644 (file)
@@ -799,7 +799,6 @@ enum HT_CAP_AMPDU_FACTOR {
  * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
  */
 #define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
 
 
 /* Spatial Multiplexing Power Save Modes */
index ace68f023b49db7aec2fd147830e5e2e3cf3c34f..181642358e3fe1b29339f1c285f3e3a44dbe7f67 100644 (file)
@@ -403,10 +403,9 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
 }
 
 
-static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb
-                               , void *accel_priv
-                               , select_queue_fallback_t fallback
-)
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
+                           struct net_device *sb_dev,
+                           select_queue_fallback_t fallback)
 {
        struct adapter  *padapter = rtw_netdev_priv(dev);
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
index e46e47d93d7d1f087e5fc1a80349f3e3b0637f67..094827c1879a656850613b501f2300777fc435e2 100644 (file)
@@ -1838,7 +1838,7 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
                 reject_agg, ctrl_agg_size, agg_size);
 
        rtlpriv->hw->max_rx_aggregation_subframes =
-               (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF);
+               (ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
 }
 
 /*********************************************************
index 3aa981fbc8f56c4215344de1d20269cfc1ae9fc3..e45ed08a51668fbe5ba03abc849746471a1a48e1 100644 (file)
@@ -11,6 +11,7 @@ config TYPEC_TCPCI
 
 config TYPEC_RT1711H
        tristate "Richtek RT1711H Type-C chip driver"
+       depends on I2C
        select TYPEC_TCPCI
        help
          Richtek RT1711H Type-C chip driver that works with
index 7f96dfa32b9cdf1cbf167fe1b0581e3b94f1a08b..d8dc3d22051f7810efa5faafba0cc71e3ad43040 100644 (file)
@@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
 }
 
 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
-                            bool bidi)
+                            bool bidi, uint32_t read_len)
 {
        struct se_cmd *se_cmd = cmd->se_cmd;
        int i, dbi;
@@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
        for_each_sg(data_sg, sg, data_nents, i) {
                int sg_remaining = sg->length;
                to = kmap_atomic(sg_page(sg)) + sg->offset;
-               while (sg_remaining > 0) {
+               while (sg_remaining > 0 && read_len > 0) {
                        if (block_remaining == 0) {
                                if (from)
                                        kunmap_atomic(from);
@@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
                        }
                        copy_bytes = min_t(size_t, sg_remaining,
                                        block_remaining);
+                       if (read_len < copy_bytes)
+                               copy_bytes = read_len;
                        offset = DATA_BLOCK_SIZE - block_remaining;
                        tcmu_flush_dcache_range(from, copy_bytes);
                        memcpy(to + sg->length - sg_remaining, from + offset,
@@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
 
                        sg_remaining -= copy_bytes;
                        block_remaining -= copy_bytes;
+                       read_len -= copy_bytes;
                }
                kunmap_atomic(to - sg->offset);
+               if (read_len == 0)
+                       break;
        }
        if (from)
                kunmap_atomic(from);
@@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
 {
        struct se_cmd *se_cmd = cmd->se_cmd;
        struct tcmu_dev *udev = cmd->tcmu_dev;
+       bool read_len_valid = false;
+       uint32_t read_len = se_cmd->data_length;
 
        /*
         * cmd has been completed already from timeout, just reclaim
@@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
                        cmd->se_cmd);
                entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
-       } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+               goto done;
+       }
+
+       if (se_cmd->data_direction == DMA_FROM_DEVICE &&
+           (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
+               read_len_valid = true;
+               if (entry->rsp.read_len < read_len)
+                       read_len = entry->rsp.read_len;
+       }
+
+       if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
                transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
-       } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               if (!read_len_valid )
+                       goto done;
+               else
+                       se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+       }
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
                /* Get Data-In buffer before clean up */
-               gather_data_area(udev, cmd, true);
+               gather_data_area(udev, cmd, true, read_len);
        } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
-               gather_data_area(udev, cmd, false);
+               gather_data_area(udev, cmd, false, read_len);
        } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
                /* TODO: */
        } else if (se_cmd->data_direction != DMA_NONE) {
@@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                        se_cmd->data_direction);
        }
 
-       target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
+done:
+       if (read_len_valid) {
+               pr_debug("read_len = %d\n", read_len);
+               target_complete_cmd_with_length(cmd->se_cmd,
+                                       entry->rsp.scsi_status, read_len);
+       } else
+               target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
 
 out:
        cmd->se_cmd = NULL;
@@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev)
        /* Initialise the mailbox of the ring buffer */
        mb = udev->mb_addr;
        mb->version = TCMU_MAILBOX_VERSION;
-       mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
+       mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
        mb->cmdr_off = CMDR_OFF;
        mb->cmdr_size = udev->cmdr_size;
 
index cbe98bc2b998276fd95b2d8086a6fabcf2351bf7..43174220170924e094567ad6271703bd881e2a6f 100644 (file)
@@ -124,6 +124,8 @@ struct n_tty_data {
        struct mutex output_lock;
 };
 
+#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
+
 static inline size_t read_cnt(struct n_tty_data *ldata)
 {
        return ldata->read_head - ldata->read_tail;
@@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
 
 static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
 {
+       smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
        return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
 }
 
@@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
 static void reset_buffer_flags(struct n_tty_data *ldata)
 {
        ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
-       ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
        ldata->commit_head = 0;
-       ldata->echo_mark = 0;
        ldata->line_start = 0;
 
        ldata->erasing = 0;
@@ -617,12 +618,19 @@ static size_t __process_echoes(struct tty_struct *tty)
        old_space = space = tty_write_room(tty);
 
        tail = ldata->echo_tail;
-       while (ldata->echo_commit != tail) {
+       while (MASK(ldata->echo_commit) != MASK(tail)) {
                c = echo_buf(ldata, tail);
                if (c == ECHO_OP_START) {
                        unsigned char op;
                        int no_space_left = 0;
 
+                       /*
+                        * Since add_echo_byte() is called without holding
+                        * output_lock, we might see only portion of multi-byte
+                        * operation.
+                        */
+                       if (MASK(ldata->echo_commit) == MASK(tail + 1))
+                               goto not_yet_stored;
                        /*
                         * If the buffer byte is the start of a multi-byte
                         * operation, get the next byte, which is either the
@@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty)
                                unsigned int num_chars, num_bs;
 
                        case ECHO_OP_ERASE_TAB:
+                               if (MASK(ldata->echo_commit) == MASK(tail + 2))
+                                       goto not_yet_stored;
                                num_chars = echo_buf(ldata, tail + 2);
 
                                /*
@@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty)
        /* If the echo buffer is nearly full (so that the possibility exists
         * of echo overrun before the next commit), then discard enough
         * data at the tail to prevent a subsequent overrun */
-       while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
+       while (ldata->echo_commit > tail &&
+              ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
                if (echo_buf(ldata, tail) == ECHO_OP_START) {
                        if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
                                tail += 3;
@@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty)
                        tail++;
        }
 
+ not_yet_stored:
        ldata->echo_tail = tail;
        return old_space - space;
 }
@@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty)
        size_t nr, old, echoed;
        size_t head;
 
+       mutex_lock(&ldata->output_lock);
        head = ldata->echo_head;
        ldata->echo_mark = head;
        old = ldata->echo_commit - ldata->echo_tail;
@@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty)
         * is over the threshold (and try again each time another
         * block is accumulated) */
        nr = head - ldata->echo_tail;
-       if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
+       if (nr < ECHO_COMMIT_WATERMARK ||
+           (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
+               mutex_unlock(&ldata->output_lock);
                return;
+       }
 
-       mutex_lock(&ldata->output_lock);
        ldata->echo_commit = head;
        echoed = __process_echoes(tty);
        mutex_unlock(&ldata->output_lock);
@@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty)
 
 static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
 {
-       *echo_buf_addr(ldata, ldata->echo_head++) = c;
+       *echo_buf_addr(ldata, ldata->echo_head) = c;
+       smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
+       ldata->echo_head++;
 }
 
 /**
@@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
        }
 
        seen_alnums = 0;
-       while (ldata->read_head != ldata->canon_head) {
+       while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
                head = ldata->read_head;
 
                /* erase a single possibly multibyte character */
                do {
                        head--;
                        c = read_buf(ldata, head);
-               } while (is_continuation(c, tty) && head != ldata->canon_head);
+               } while (is_continuation(c, tty) &&
+                        MASK(head) != MASK(ldata->canon_head));
 
                /* do not partially erase */
                if (is_continuation(c, tty))
@@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
                                 * This info is used to go back the correct
                                 * number of columns.
                                 */
-                               while (tail != ldata->canon_head) {
+                               while (MASK(tail) != MASK(ldata->canon_head)) {
                                        tail--;
                                        c = read_buf(ldata, tail);
                                        if (c == '\t') {
@@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
                        finish_erasing(ldata);
                        echo_char(c, tty);
                        echo_char_raw('\n', ldata);
-                       while (tail != ldata->read_head) {
+                       while (MASK(tail) != MASK(ldata->read_head)) {
                                echo_char(read_buf(ldata, tail), tty);
                                tail++;
                        }
@@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty)
        struct n_tty_data *ldata;
 
        /* Currently a malloc failure here can panic */
-       ldata = vmalloc(sizeof(*ldata));
+       ldata = vzalloc(sizeof(*ldata));
        if (!ldata)
-               goto err;
+               return -ENOMEM;
 
        ldata->overrun_time = jiffies;
        mutex_init(&ldata->atomic_read_lock);
        mutex_init(&ldata->output_lock);
 
        tty->disc_data = ldata;
-       reset_buffer_flags(tty->disc_data);
-       ldata->column = 0;
-       ldata->canon_column = 0;
-       ldata->num_overrun = 0;
-       ldata->no_room = 0;
-       ldata->lnext = 0;
        tty->closing = 0;
        /* indicate buffer work may resume */
        clear_bit(TTY_LDISC_HALTED, &tty->flags);
        n_tty_set_termios(tty, NULL);
        tty_unthrottle(tty);
-
        return 0;
-err:
-       return -ENOMEM;
 }
 
 static inline int input_available_p(struct tty_struct *tty, int poll)
@@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
        tail = ldata->read_tail;
        nr = head - tail;
        /* Skip EOF-chars.. */
-       while (head != tail) {
+       while (MASK(head) != MASK(tail)) {
                if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
                    read_buf(ldata, tail) == __DISABLED_CHAR)
                        nr--;
index df93b727e984ee3d185fa0f5a42cad09d63a3f65..9e59f4788589c879358ce12507362baec459533d 100644 (file)
@@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
 static void __exit serdev_exit(void)
 {
        bus_unregister(&serdev_bus_type);
+       ida_destroy(&ctrl_ida);
 }
 module_exit(serdev_exit);
 
index 3296a05cda2db8d53b1d869123ed8e2aa884a248..f80a300b5d68f6e8ad61b7daf2544234da7e1662 100644 (file)
@@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = {
        /* multi-io cards handled by parport_serial */
        { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
        { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
-       { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
        { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
-       { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
 
        /* Moxa Smartio MUE boards handled by 8250_moxa */
        { PCI_VDEVICE(MOXA, 0x1024), },
index 1eb1a376a0419d4084cd7a72e1e2c7eb769798f7..15eb6c829d39c5b108adfca034fa50769763c0bc 100644 (file)
@@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons)      /* return 0 on success */
        if (!*vc->vc_uni_pagedir_loc)
                con_set_default_unimap(vc);
 
-       vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
+       vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
        if (!vc->vc_screenbuf)
                goto err_free;
 
@@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
 
        if (new_screen_size > (4 << 20))
                return -EINVAL;
-       newscreen = kmalloc(new_screen_size, GFP_USER);
+       newscreen = kzalloc(new_screen_size, GFP_USER);
        if (!newscreen)
                return -ENOMEM;
 
index af45aa3222b5ce1c99ccb4de586c988d3b3a0b9f..4638d9b066bea7ad2b35f6c966ee7acec428facf 100644 (file)
@@ -124,8 +124,11 @@ static int host_start(struct ci_hdrc *ci)
 
        hcd->power_budget = ci->platdata->power_budget;
        hcd->tpl_support = ci->platdata->tpl_support;
-       if (ci->phy || ci->usb_phy)
+       if (ci->phy || ci->usb_phy) {
                hcd->skip_phy_initialization = 1;
+               if (ci->usb_phy)
+                       hcd->usb_phy = ci->usb_phy;
+       }
 
        ehci = hcd_to_ehci(hcd);
        ehci->caps = ci->hw_bank.cap;
index 7b366a6c0b493f2eb8bec4959830d222223f3cb2..998b32d0167e9970c4aa096520bdd7c9011f9137 100644 (file)
@@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
        .driver_info = SINGLE_RX_URB,
        },
+       { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
        { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
        .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
        },
index 4a56ac772a3c35360d3834f5542aad22d5b1b720..71b3b08ad516c9fb3bfbd403da1687b5825e7e14 100644 (file)
@@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup {
  * @frame_list_sz:      Frame list size
  * @desc_gen_cache:     Kmem cache for generic descriptors
  * @desc_hsisoc_cache:  Kmem cache for hs isochronous descriptors
+ * @unaligned_cache:    Kmem cache for DMA mode to handle non-aligned buf
  *
  * These are for peripheral mode:
  *
@@ -1177,6 +1178,8 @@ struct dwc2_hsotg {
        u32 frame_list_sz;
        struct kmem_cache *desc_gen_cache;
        struct kmem_cache *desc_hsisoc_cache;
+       struct kmem_cache *unaligned_cache;
+#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
 
 #endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
 
index f0d9ccf1d665ad37b2f23786806bde8c16da11ea..a0f82cca2d9a8e70f0760acbc3b19ced08c7d7dc 100644 (file)
@@ -812,6 +812,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
        u32 index;
        u32 maxsize = 0;
        u32 mask = 0;
+       u8 pid = 0;
 
        maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
 
@@ -840,7 +841,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
                         ((len << DEV_DMA_NBYTES_SHIFT) & mask));
 
        if (hs_ep->dir_in) {
-               desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
+               if (len)
+                       pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
+               else
+                       pid = 1;
+               desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
                                 DEV_DMA_ISOC_PID_MASK) |
                                ((len % hs_ep->ep.maxpacket) ?
                                 DEV_DMA_SHORT : 0) |
@@ -884,6 +889,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
        struct dwc2_dma_desc *desc;
 
        if (list_empty(&hs_ep->queue)) {
+               hs_ep->target_frame = TARGET_FRAME_INITIAL;
                dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
                return;
        }
@@ -2755,8 +2761,6 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
         */
        tmp = dwc2_hsotg_read_frameno(hsotg);
 
-       dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0);
-
        if (using_desc_dma(hsotg)) {
                if (ep->target_frame == TARGET_FRAME_INITIAL) {
                        /* Start first ISO Out */
@@ -2817,9 +2821,6 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
 
                tmp = dwc2_hsotg_read_frameno(hsotg);
                if (using_desc_dma(hsotg)) {
-                       dwc2_hsotg_complete_request(hsotg, hs_ep,
-                                                   get_ep_head(hs_ep), 0);
-
                        hs_ep->target_frame = tmp;
                        dwc2_gadget_incr_frame_num(hs_ep);
                        dwc2_gadget_start_isoc_ddma(hs_ep);
@@ -4739,9 +4740,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
        }
 
        ret = usb_add_gadget_udc(dev, &hsotg->gadget);
-       if (ret)
+       if (ret) {
+               dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
+                                          hsotg->ctrl_req);
                return ret;
-
+       }
        dwc2_hsotg_dump(hsotg);
 
        return 0;
@@ -4755,6 +4758,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
 {
        usb_del_gadget_udc(&hsotg->gadget);
+       dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
 
        return 0;
 }
index edaf0b6af4f0491ba192d346c29a792a06a85751..b1104be3429c2285677d2101004080e9a30af769 100644 (file)
@@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
        }
 
        if (hsotg->params.host_dma) {
-               dwc2_writel((u32)chan->xfer_dma,
-                           hsotg->regs + HCDMA(chan->hc_num));
+               dma_addr_t dma_addr;
+
+               if (chan->align_buf) {
+                       if (dbg_hc(chan))
+                               dev_vdbg(hsotg->dev, "align_buf\n");
+                       dma_addr = chan->align_buf;
+               } else {
+                       dma_addr = chan->xfer_dma;
+               }
+               dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
+
                if (dbg_hc(chan))
                        dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
-                                (unsigned long)chan->xfer_dma, chan->hc_num);
+                                (unsigned long)dma_addr, chan->hc_num);
        }
 
        /* Start the split */
@@ -2625,6 +2634,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
        }
 }
 
+static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
+                                           struct dwc2_qh *qh,
+                                           struct dwc2_host_chan *chan)
+{
+       if (!hsotg->unaligned_cache ||
+           chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
+               return -ENOMEM;
+
+       if (!qh->dw_align_buf) {
+               qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
+                                                   GFP_ATOMIC | GFP_DMA);
+               if (!qh->dw_align_buf)
+                       return -ENOMEM;
+       }
+
+       qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
+                                             DWC2_KMEM_UNALIGNED_BUF_SIZE,
+                                             DMA_FROM_DEVICE);
+
+       if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
+               dev_err(hsotg->dev, "can't map align_buf\n");
+               chan->align_buf = 0;
+               return -EINVAL;
+       }
+
+       chan->align_buf = qh->dw_align_buf_dma;
+       return 0;
+}
+
 #define DWC2_USB_DMA_ALIGN 4
 
 struct dma_aligned_buffer {
@@ -2802,6 +2840,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
        /* Set the transfer attributes */
        dwc2_hc_init_xfer(hsotg, chan, qtd);
 
+       /* For non-dword aligned buffers */
+       if (hsotg->params.host_dma && qh->do_split &&
+           chan->ep_is_in && (chan->xfer_dma & 0x3)) {
+               dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
+               if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
+                       dev_err(hsotg->dev,
+                               "Failed to allocate memory to handle non-aligned buffer\n");
+                       /* Add channel back to free list */
+                       chan->align_buf = 0;
+                       chan->multi_count = 0;
+                       list_add_tail(&chan->hc_list_entry,
+                                     &hsotg->free_hc_list);
+                       qtd->in_process = 0;
+                       qh->channel = NULL;
+                       return -ENOMEM;
+               }
+       } else {
+               /*
+                * We assume that DMA is always aligned in non-split
+                * case or split out case. Warn if not.
+                */
+               WARN_ON_ONCE(hsotg->params.host_dma &&
+                            (chan->xfer_dma & 0x3));
+               chan->align_buf = 0;
+       }
+
        if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
            chan->ep_type == USB_ENDPOINT_XFER_ISOC)
                /*
@@ -5246,6 +5310,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
                }
        }
 
+       if (hsotg->params.host_dma) {
+               /*
+                * Create kmem caches to handle non-aligned buffer
+                * in Buffer DMA mode.
+                */
+               hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
+                                               DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
+                                               SLAB_CACHE_DMA, NULL);
+               if (!hsotg->unaligned_cache)
+                       dev_err(hsotg->dev,
+                               "unable to create dwc2 unaligned cache\n");
+       }
+
        hsotg->otg_port = 1;
        hsotg->frame_list = NULL;
        hsotg->frame_list_dma = 0;
@@ -5280,8 +5357,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
        return 0;
 
 error4:
-       kmem_cache_destroy(hsotg->desc_gen_cache);
+       kmem_cache_destroy(hsotg->unaligned_cache);
        kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+       kmem_cache_destroy(hsotg->desc_gen_cache);
 error3:
        dwc2_hcd_release(hsotg);
 error2:
@@ -5322,8 +5400,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
        usb_remove_hcd(hcd);
        hsotg->priv = NULL;
 
-       kmem_cache_destroy(hsotg->desc_gen_cache);
+       kmem_cache_destroy(hsotg->unaligned_cache);
        kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+       kmem_cache_destroy(hsotg->desc_gen_cache);
 
        dwc2_hcd_release(hsotg);
        usb_put_hcd(hcd);
@@ -5435,7 +5514,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
        dwc2_writel(hprt0, hsotg->regs + HPRT0);
 
        /* Wait for the HPRT0.PrtSusp register field to be set */
-       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 300))
+       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
                dev_warn(hsotg->dev, "Suspend wasn't generated\n");
 
        /*
@@ -5616,6 +5695,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
                return ret;
        }
 
+       dwc2_hcd_rem_wakeup(hsotg);
+
        hsotg->hibernated = 0;
        hsotg->bus_suspended = 0;
        hsotg->lx_state = DWC2_L0;
index 7db1ee7e7a7781c12100d413abe7e4d62828e951..5502a501f5166640a2926132e4d4e1ee2f450724 100644 (file)
@@ -76,6 +76,8 @@ struct dwc2_qh;
  *                      (micro)frame
  * @xfer_buf:           Pointer to current transfer buffer position
  * @xfer_dma:           DMA address of xfer_buf
+ * @align_buf:          In Buffer DMA mode this will be used if xfer_buf is not
+ *                      DWORD aligned
  * @xfer_len:           Total number of bytes to transfer
  * @xfer_count:         Number of bytes transferred so far
  * @start_pkt_count:    Packet count at start of transfer
@@ -133,6 +135,7 @@ struct dwc2_host_chan {
 
        u8 *xfer_buf;
        dma_addr_t xfer_dma;
+       dma_addr_t align_buf;
        u32 xfer_len;
        u32 xfer_count;
        u16 start_pkt_count;
@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time {
  *                           speed.  Note that this is in "schedule slice" which
  *                           is tightly packed.
  * @ntd:                Actual number of transfer descriptors in a list
+ * @dw_align_buf:       Used instead of original buffer if its physical address
+ *                      is not dword-aligned
+ * @dw_align_buf_dma:   DMA address for dw_align_buf
  * @qtd_list:           List of QTDs for this QH
  * @channel:            Host channel currently processing transfers for this QH
  * @qh_list_entry:      Entry for QH in either the periodic or non-periodic
@@ -350,6 +356,8 @@ struct dwc2_qh {
        struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
        u32 ls_start_schedule_slice;
        u16 ntd;
+       u8 *dw_align_buf;
+       dma_addr_t dw_align_buf_dma;
        struct list_head qtd_list;
        struct dwc2_host_chan *channel;
        struct list_head qh_list_entry;
index fbea5e3fb9479bc4ff4ef250026df2583969ec67..ed7f05cf490637ba554e8be2fabea46bc118bb8c 100644 (file)
@@ -942,14 +942,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
        frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
        len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
                                          DWC2_HC_XFER_COMPLETE, NULL);
-       if (!len) {
+       if (!len && !qtd->isoc_split_offset) {
                qtd->complete_split = 0;
-               qtd->isoc_split_offset = 0;
                return 0;
        }
 
        frame_desc->actual_length += len;
 
+       if (chan->align_buf) {
+               dev_vdbg(hsotg->dev, "non-aligned buffer\n");
+               dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
+                                DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
+               memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
+                      chan->qh->dw_align_buf, len);
+       }
+
        qtd->isoc_split_offset += len;
 
        hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
index d7c3d6c776d86a8edf5c41832a7856cd6fca29eb..301ced1618f873203b534ffa77cf7ef59a773f84 100644 (file)
@@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
        /* Get the map and adjust if this is a multi_tt hub */
        map = qh->dwc_tt->periodic_bitmaps;
        if (qh->dwc_tt->usb_tt->multi)
-               map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+               map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
 
        return map;
 }
@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 
        if (qh->desc_list)
                dwc2_hcd_qh_free_ddma(hsotg, qh);
+       else if (hsotg->unaligned_cache && qh->dw_align_buf)
+               kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
+
        kfree(qh);
 }
 
index ea91310113b9abd2a233a17bcd973d7a1ed1e09e..103807587dc640a747d75339f703a3aea0e8e992 100644 (file)
@@ -1272,7 +1272,6 @@ static int dwc3_probe(struct platform_device *pdev)
        if (!dwc->clks)
                return -ENOMEM;
 
-       dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
        dwc->dev = dev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1307,15 +1306,19 @@ static int dwc3_probe(struct platform_device *pdev)
        if (IS_ERR(dwc->reset))
                return PTR_ERR(dwc->reset);
 
-       ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
-       if (ret == -EPROBE_DEFER)
-               return ret;
-       /*
-        * Clocks are optional, but new DT platforms should support all clocks
-        * as required by the DT-binding.
-        */
-       if (ret)
-               dwc->num_clks = 0;
+       if (dev->of_node) {
+               dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
+
+               ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+               /*
+                * Clocks are optional, but new DT platforms should support all
+                * clocks as required by the DT-binding.
+                */
+               if (ret)
+                       dwc->num_clks = 0;
+       }
 
        ret = reset_control_deassert(dwc->reset);
        if (ret)
index 6b3ccd542bd76f6c40308a9df550ac2674673e45..dbeff5e6ad1461eea71a4cf9e562755cd50b1b17 100644 (file)
@@ -165,8 +165,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
 
        reset_control_put(simple->resets);
 
-       pm_runtime_put_sync(dev);
        pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
+       pm_runtime_set_suspended(dev);
 
        return 0;
 }
index c961a94d136b5248a5e242a6ab3370b22f3fa360..f57e7c94b8e5e0154ef430e3a0b3973d6ff84bd9 100644 (file)
@@ -34,6 +34,7 @@
 #define PCI_DEVICE_ID_INTEL_GLK                        0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP              0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH               0xa36e
+#define PCI_DEVICE_ID_INTEL_ICLLP              0x34ee
 
 #define PCI_INTEL_BXT_DSM_GUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
 #define PCI_INTEL_BXT_FUNC_PMU_PWR     4
@@ -289,6 +290,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
        {  }    /* Terminating Entry */
 };
index b0e67ab2f98cd09ba54eaabfdab1680e30783e63..a6d0203e40b6e048bfb736133321ef6c857c09ca 100644 (file)
@@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
        qcom->dwc3 = of_find_device_by_node(dwc3_np);
        if (!qcom->dwc3) {
                dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
+               ret = -ENODEV;
                goto depopulate;
        }
 
@@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_qcom_pm_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
        int ret = 0;
@@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev)
        return ret;
 }
 
-static int dwc3_qcom_pm_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
        int ret;
@@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev)
 
        return ret;
 }
-#endif
 
-#ifdef CONFIG_PM
-static int dwc3_qcom_runtime_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
 
        return dwc3_qcom_suspend(qcom);
 }
 
-static int dwc3_qcom_runtime_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
 
        return dwc3_qcom_resume(qcom);
 }
-#endif
 
 static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
index f242c2bcea810c0dee04f067ceea0dc716912058..d2fa071c21b17a1c0c03fbcf1db416d311e82a12 100644 (file)
@@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                 */
                if (w_value && !f->get_alt)
                        break;
+
+               spin_lock(&cdev->lock);
                value = f->set_alt(f, w_index, w_value);
                if (value == USB_GADGET_DELAYED_STATUS) {
                        DBG(cdev,
@@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                        DBG(cdev, "delayed_status count %d\n",
                                        cdev->delayed_status);
                }
+               spin_unlock(&cdev->lock);
                break;
        case USB_REQ_GET_INTERFACE:
                if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
index dce9d12c7981afb1733be479e9daa43977bd218a..33e2030503fa5b47cff5de52c43ac080b5507702 100644 (file)
@@ -215,6 +215,7 @@ struct ffs_io_data {
 
        struct mm_struct *mm;
        struct work_struct work;
+       struct work_struct cancellation_work;
 
        struct usb_ep *ep;
        struct usb_request *req;
@@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static void ffs_aio_cancel_worker(struct work_struct *work)
+{
+       struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+                                                  cancellation_work);
+
+       ENTER();
+
+       usb_ep_dequeue(io_data->ep, io_data->req);
+}
+
 static int ffs_aio_cancel(struct kiocb *kiocb)
 {
        struct ffs_io_data *io_data = kiocb->private;
-       struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+       struct ffs_data *ffs = io_data->ffs;
        int value;
 
        ENTER();
 
-       spin_lock_irq(&epfile->ffs->eps_lock);
-
-       if (likely(io_data && io_data->ep && io_data->req))
-               value = usb_ep_dequeue(io_data->ep, io_data->req);
-       else
+       if (likely(io_data && io_data->ep && io_data->req)) {
+               INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
+               queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
+               value = -EINPROGRESS;
+       } else {
                value = -EINVAL;
-
-       spin_unlock_irq(&epfile->ffs->eps_lock);
+       }
 
        return value;
 }
index acbd3d7b8828693f79a51f19ad70fb1aa4ade050..8a62eee9eee11aa3df2c57cef9290dca9b4cb4a9 100644 (file)
@@ -886,12 +886,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 
        dev = xhci->devs[slot_id];
 
-       trace_xhci_free_virt_device(dev);
-
        xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
        if (!dev)
                return;
 
+       trace_xhci_free_virt_device(dev);
+
        if (dev->tt_info)
                old_active_eps = dev->tt_info->active_eps;
 
index a8c1d073cba05e3b070e73722d02b32eaf112e69..4b463e5202a421705be74610a136239dc3a9c423 100644 (file)
@@ -481,7 +481,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
        unsigned long mask;
        unsigned int port;
        bool idle, enable;
-       int err;
+       int err = 0;
 
        memset(&rsp, 0, sizeof(rsp));
 
@@ -1223,10 +1223,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
        usb_put_hcd(tegra->hcd);
 disable_xusbc:
-       if (!&pdev->dev.pm_domain)
+       if (!pdev->dev.pm_domain)
                tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
 disable_xusba:
-       if (!&pdev->dev.pm_domain)
+       if (!pdev->dev.pm_domain)
                tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
 put_padctl:
        tegra_xusb_padctl_put(tegra->padctl);
index 410544ffe78f68fa73e7328e8e105028a9116008..88b427434bd82536c653a911bb393c4bdb87814b 100644 (file)
@@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
        TP_ARGS(ring, trb)
 );
 
+DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
+       TP_PROTO(struct xhci_virt_device *vdev),
+       TP_ARGS(vdev),
+       TP_STRUCT__entry(
+               __field(void *, vdev)
+               __field(unsigned long long, out_ctx)
+               __field(unsigned long long, in_ctx)
+               __field(u8, fake_port)
+               __field(u8, real_port)
+               __field(u16, current_mel)
+
+       ),
+       TP_fast_assign(
+               __entry->vdev = vdev;
+               __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
+               __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
+               __entry->fake_port = (u8) vdev->fake_port;
+               __entry->real_port = (u8) vdev->real_port;
+               __entry->current_mel = (u16) vdev->current_mel;
+               ),
+       TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
+               __entry->vdev, __entry->in_ctx, __entry->out_ctx,
+               __entry->fake_port, __entry->real_port, __entry->current_mel
+       )
+);
+
+DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
+       TP_PROTO(struct xhci_virt_device *vdev),
+       TP_ARGS(vdev)
+);
+
 DECLARE_EVENT_CLASS(xhci_log_virt_dev,
        TP_PROTO(struct xhci_virt_device *vdev),
        TP_ARGS(vdev),
@@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
        TP_ARGS(vdev)
 );
 
-DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
-       TP_PROTO(struct xhci_virt_device *vdev),
-       TP_ARGS(vdev)
-);
-
 DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
        TP_PROTO(struct xhci_virt_device *vdev),
        TP_ARGS(vdev)
index 8c8da2d657fa1008c1e612e6f30d3e9716d534b6..2f4850f25e82f0f009b8f614b593fc74eb5247aa 100644 (file)
@@ -908,6 +908,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
        spin_unlock_irqrestore(&xhci->lock, flags);
 }
 
+static bool xhci_pending_portevent(struct xhci_hcd *xhci)
+{
+       struct xhci_port        **ports;
+       int                     port_index;
+       u32                     status;
+       u32                     portsc;
+
+       status = readl(&xhci->op_regs->status);
+       if (status & STS_EINT)
+               return true;
+       /*
+        * Checking STS_EINT is not enough as there is a lag between a change
+        * bit being set and the Port Status Change Event that it generated
+        * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
+        */
+
+       port_index = xhci->usb2_rhub.num_ports;
+       ports = xhci->usb2_rhub.ports;
+       while (port_index--) {
+               portsc = readl(ports[port_index]->addr);
+               if (portsc & PORT_CHANGE_MASK ||
+                   (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+                       return true;
+       }
+       port_index = xhci->usb3_rhub.num_ports;
+       ports = xhci->usb3_rhub.ports;
+       while (port_index--) {
+               portsc = readl(ports[port_index]->addr);
+               if (portsc & PORT_CHANGE_MASK ||
+                   (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+                       return true;
+       }
+       return false;
+}
+
 /*
  * Stop HC (not bus-specific)
  *
@@ -1009,7 +1044,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
  */
 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 {
-       u32                     command, temp = 0, status;
+       u32                     command, temp = 0;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        struct usb_hcd          *secondary_hcd;
        int                     retval = 0;
@@ -1043,8 +1078,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                command = readl(&xhci->op_regs->command);
                command |= CMD_CRS;
                writel(command, &xhci->op_regs->command);
+               /*
+                * Some controllers take up to 55+ ms to complete the controller
+                * restore so setting the timeout to 100ms. Xhci specification
+                * doesn't mention any timeout value.
+                */
                if (xhci_handshake(&xhci->op_regs->status,
-                             STS_RESTORE, 0, 10 * 1000)) {
+                             STS_RESTORE, 0, 100 * 1000)) {
                        xhci_warn(xhci, "WARN: xHC restore state timeout\n");
                        spin_unlock_irq(&xhci->lock);
                        return -ETIMEDOUT;
@@ -1134,8 +1174,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
  done:
        if (retval == 0) {
                /* Resume root hubs only when have pending events. */
-               status = readl(&xhci->op_regs->status);
-               if (status & STS_EINT) {
+               if (xhci_pending_portevent(xhci)) {
                        usb_hcd_resume_root_hub(xhci->shared_hcd);
                        usb_hcd_resume_root_hub(hcd);
                }
index 939e2f86b595eecbf1f1ecac7dcd7f39965d238d..841e89ffe2e9d88f6f81255340da58144916ca59 100644 (file)
@@ -382,6 +382,10 @@ struct xhci_op_regs {
 #define PORT_PLC       (1 << 22)
 /* port configure error change - port failed to configure its link partner */
 #define PORT_CEC       (1 << 23)
+#define PORT_CHANGE_MASK       (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+                                PORT_RC | PORT_PLC | PORT_CEC)
+
+
 /* Cold Attach Status - xHC can set this bit to report device attached during
  * Sx state. Warm port reset should be perfomed to clear this bit and move port
  * to connected state.
index eb6c26cbe5792b0e535c77b9e2e245b700071458..ee0cc1d90b51a17ca3f77e989f7e0265e961170b 100644 (file)
@@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
        { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+       { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
+       { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
+       { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
        { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
        { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
        { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+       { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
+       { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
+       { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
        { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
        { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
        { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
@@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
        { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
        { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+       { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
        { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
+       { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
        { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
        { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -134,17 +142,23 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+       { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
+       { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
        { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
        { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
        { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+       { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
        { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
        { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
        { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+       { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
+       { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
        { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
        { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
        { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
index 8a201dd53d36b352b3d7fb68cf6486c08ddb96ff..d961f1ec0e0856a34f5c5e4ca87a2030549ac960 100644 (file)
@@ -418,17 +418,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
        u64 ts_nsec = local_clock();
        unsigned long rem_nsec;
 
+       mutex_lock(&port->logbuffer_lock);
        if (!port->logbuffer[port->logbuffer_head]) {
                port->logbuffer[port->logbuffer_head] =
                                kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
-               if (!port->logbuffer[port->logbuffer_head])
+               if (!port->logbuffer[port->logbuffer_head]) {
+                       mutex_unlock(&port->logbuffer_lock);
                        return;
+               }
        }
 
        vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
 
-       mutex_lock(&port->logbuffer_lock);
-
        if (tcpm_log_full(port)) {
                port->logbuffer_head = max(port->logbuffer_head - 1, 0);
                strcpy(tmpbuffer, "overflow");
@@ -3043,7 +3044,8 @@ static void run_state_machine(struct tcpm_port *port)
                    tcpm_port_is_sink(port) &&
                    time_is_after_jiffies(port->delayed_runtime)) {
                        tcpm_set_state(port, SNK_DISCOVERY,
-                                      port->delayed_runtime - jiffies);
+                                      jiffies_to_msecs(port->delayed_runtime -
+                                                       jiffies));
                        break;
                }
                tcpm_set_state(port, unattached_state(port), 0);
index bd5cca5632b395def6384ec233d8ba5926e81c93..8d0a6fe748bdc50ca99800c3d6ba5680a4e9f0bd 100644 (file)
@@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work)
        }
 
        if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
+               typec_set_pwr_role(con->port, con->status.pwr_dir);
+
+               switch (con->status.partner_type) {
+               case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+                       typec_set_data_role(con->port, TYPEC_HOST);
+                       break;
+               case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+                       typec_set_data_role(con->port, TYPEC_DEVICE);
+                       break;
+               default:
+                       break;
+               }
+
                if (con->status.connected)
                        ucsi_register_partner(con);
                else
index 44eb4e1ea817b2e38eab36cee60021368508a342..a18112a83faed2df09e49c0a5a93d2fce0823c5f 100644 (file)
@@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       /* This will make sure we can use ioremap_nocache() */
+       status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+
        /*
         * NOTE: The memory region for the data structures is used also in an
         * operation region, which means ACPI has already reserved it. Therefore
index 686dc670fd294b3077cf363241338ab871b26244..b2240361f1a14bd4150ce09c56c8acbc490ad441 100644 (file)
@@ -396,13 +396,10 @@ static inline unsigned long busy_clock(void)
        return local_clock() >> 10;
 }
 
-static bool vhost_can_busy_poll(struct vhost_dev *dev,
-                               unsigned long endtime)
+static bool vhost_can_busy_poll(unsigned long endtime)
 {
-       return likely(!need_resched()) &&
-              likely(!time_after(busy_clock(), endtime)) &&
-              likely(!signal_pending(current)) &&
-              !vhost_has_work(dev);
+       return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
+                     !signal_pending(current));
 }
 
 static void vhost_net_disable_vq(struct vhost_net *n,
@@ -434,7 +431,8 @@ static int vhost_net_enable_vq(struct vhost_net *n,
 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
                                    struct vhost_virtqueue *vq,
                                    struct iovec iov[], unsigned int iov_size,
-                                   unsigned int *out_num, unsigned int *in_num)
+                                   unsigned int *out_num, unsigned int *in_num,
+                                   bool *busyloop_intr)
 {
        unsigned long uninitialized_var(endtime);
        int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
@@ -443,9 +441,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
        if (r == vq->num && vq->busyloop_timeout) {
                preempt_disable();
                endtime = busy_clock() + vq->busyloop_timeout;
-               while (vhost_can_busy_poll(vq->dev, endtime) &&
-                      vhost_vq_avail_empty(vq->dev, vq))
+               while (vhost_can_busy_poll(endtime)) {
+                       if (vhost_has_work(vq->dev)) {
+                               *busyloop_intr = true;
+                               break;
+                       }
+                       if (!vhost_vq_avail_empty(vq->dev, vq))
+                               break;
                        cpu_relax();
+               }
                preempt_enable();
                r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
                                      out_num, in_num, NULL, NULL);
@@ -501,20 +505,24 @@ static void handle_tx(struct vhost_net *net)
        zcopy = nvq->ubufs;
 
        for (;;) {
+               bool busyloop_intr;
+
                /* Release DMAs done buffers first */
                if (zcopy)
                        vhost_zerocopy_signal_used(net, vq);
 
-
+               busyloop_intr = false;
                head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
                                                ARRAY_SIZE(vq->iov),
-                                               &out, &in);
+                                               &out, &in, &busyloop_intr);
                /* On error, stop handling until the next kick. */
                if (unlikely(head < 0))
                        break;
                /* Nothing new?  Wait for eventfd to tell us they refilled. */
                if (head == vq->num) {
-                       if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+                       if (unlikely(busyloop_intr)) {
+                               vhost_poll_queue(&vq->poll);
+                       } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
                                vhost_disable_notify(&net->dev, vq);
                                continue;
                        }
@@ -645,41 +653,50 @@ static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
        nvq->done_idx = 0;
 }
 
-static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
+static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
+                                     bool *busyloop_intr)
 {
-       struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
-       struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
-       struct vhost_virtqueue *vq = &nvq->vq;
+       struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+       struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
+       struct vhost_virtqueue *rvq = &rnvq->vq;
+       struct vhost_virtqueue *tvq = &tnvq->vq;
        unsigned long uninitialized_var(endtime);
-       int len = peek_head_len(rvq, sk);
+       int len = peek_head_len(rnvq, sk);
 
-       if (!len && vq->busyloop_timeout) {
+       if (!len && tvq->busyloop_timeout) {
                /* Flush batched heads first */
-               vhost_rx_signal_used(rvq);
+               vhost_rx_signal_used(rnvq);
                /* Both tx vq and rx socket were polled here */
-               mutex_lock_nested(&vq->mutex, 1);
-               vhost_disable_notify(&net->dev, vq);
+               mutex_lock_nested(&tvq->mutex, 1);
+               vhost_disable_notify(&net->dev, tvq);
 
                preempt_disable();
-               endtime = busy_clock() + vq->busyloop_timeout;
+               endtime = busy_clock() + tvq->busyloop_timeout;
 
-               while (vhost_can_busy_poll(&net->dev, endtime) &&
-                      !sk_has_rx_data(sk) &&
-                      vhost_vq_avail_empty(&net->dev, vq))
+               while (vhost_can_busy_poll(endtime)) {
+                       if (vhost_has_work(&net->dev)) {
+                               *busyloop_intr = true;
+                               break;
+                       }
+                       if ((sk_has_rx_data(sk) &&
+                            !vhost_vq_avail_empty(&net->dev, rvq)) ||
+                           !vhost_vq_avail_empty(&net->dev, tvq))
+                               break;
                        cpu_relax();
+               }
 
                preempt_enable();
 
-               if (!vhost_vq_avail_empty(&net->dev, vq))
-                       vhost_poll_queue(&vq->poll);
-               else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
-                       vhost_disable_notify(&net->dev, vq);
-                       vhost_poll_queue(&vq->poll);
+               if (!vhost_vq_avail_empty(&net->dev, tvq)) {
+                       vhost_poll_queue(&tvq->poll);
+               } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
+                       vhost_disable_notify(&net->dev, tvq);
+                       vhost_poll_queue(&tvq->poll);
                }
 
-               mutex_unlock(&vq->mutex);
+               mutex_unlock(&tvq->mutex);
 
-               len = peek_head_len(rvq, sk);
+               len = peek_head_len(rnvq, sk);
        }
 
        return len;
@@ -786,6 +803,7 @@ static void handle_rx(struct vhost_net *net)
        s16 headcount;
        size_t vhost_hlen, sock_hlen;
        size_t vhost_len, sock_len;
+       bool busyloop_intr = false;
        struct socket *sock;
        struct iov_iter fixup;
        __virtio16 num_buffers;
@@ -809,7 +827,8 @@ static void handle_rx(struct vhost_net *net)
                vq->log : NULL;
        mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
 
-       while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
+       while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
+                                                     &busyloop_intr))) {
                sock_len += sock_hlen;
                vhost_len = sock_len + vhost_hlen;
                headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -820,7 +839,9 @@ static void handle_rx(struct vhost_net *net)
                        goto out;
                /* OK, now we need to know about added descriptors. */
                if (!headcount) {
-                       if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+                       if (unlikely(busyloop_intr)) {
+                               vhost_poll_queue(&vq->poll);
+                       } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
                                /* They have slipped one in as we were
                                 * doing that: check again. */
                                vhost_disable_notify(&net->dev, vq);
@@ -830,6 +851,7 @@ static void handle_rx(struct vhost_net *net)
                         * they refilled. */
                        goto out;
                }
+               busyloop_intr = false;
                if (nvq->rx_ring)
                        msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
                /* On overrun, truncate and discard */
@@ -896,7 +918,10 @@ static void handle_rx(struct vhost_net *net)
                        goto out;
                }
        }
-       vhost_net_enable_vq(net, vq);
+       if (unlikely(busyloop_intr))
+               vhost_poll_queue(&vq->poll);
+       else
+               vhost_net_enable_vq(net, vq);
 out:
        vhost_rx_signal_used(nvq);
        mutex_unlock(&vq->mutex);
@@ -1226,7 +1251,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        if (ubufs)
                vhost_net_ubuf_put_wait_and_free(ubufs);
 err_ubufs:
-       sockfd_put(sock);
+       if (sock)
+               sockfd_put(sock);
 err_vq:
        mutex_unlock(&vq->mutex);
 err:
index 451e833f593175886fd4fa6ae066860dad4cf95e..48b154276179f0269c7444e38d6717ac493a06d5 100644 (file)
@@ -41,4 +41,4 @@ obj-$(CONFIG_XEN_PVCALLS_FRONTEND)    += pvcalls-front.o
 xen-evtchn-y                           := evtchn.o
 xen-gntdev-y                           := gntdev.o
 xen-gntalloc-y                         := gntalloc.o
-xen-privcmd-y                          := privcmd.o
+xen-privcmd-y                          := privcmd.o privcmd-buf.o
index 762378f1811cc9069dc6171edb55aaa3610b82fa..08e4af04d6f2c32850a049a83721933a82883b8c 100644 (file)
@@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq)
                xen_irq_info_cleanup(info);
        }
 
-       BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
-
        xen_free_irq(irq);
 }
 
index 2473b0a9e6e41d5d51b47e318d7e3b26d81ec5f6..ba9f3eec2bd00f6f39eb952ed5815e7b45c9735e 100644 (file)
@@ -799,7 +799,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
 
        return 0;
 }
-EXPORT_SYMBOL(gnttab_alloc_pages);
+EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 
 /**
  * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
@@ -820,7 +820,7 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
        }
        free_xenballooned_pages(nr_pages, pages);
 }
-EXPORT_SYMBOL(gnttab_free_pages);
+EXPORT_SYMBOL_GPL(gnttab_free_pages);
 
 /* Handling of paged out grant targets (GNTST_eagain) */
 #define MAX_DELAY 256
index 8835065029d34a150a91662bb4562b4a41be50ca..c93d8ef8df3483bbc393b2101c189120f844b634 100644 (file)
@@ -289,8 +289,15 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
                return;
        }
 
-       if (sysrq_key != '\0')
-               xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+       if (sysrq_key != '\0') {
+               err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+               if (err) {
+                       pr_err("%s: Error %d writing sysrq in control/sysrq\n",
+                              __func__, err);
+                       xenbus_transaction_end(xbt, 1);
+                       return;
+               }
+       }
 
        err = xenbus_transaction_end(xbt, 0);
        if (err == -EAGAIN)
@@ -342,7 +349,12 @@ static int setup_shutdown_watcher(void)
                        continue;
                snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
                         shutdown_handlers[idx].command);
-               xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+               err = xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+               if (err) {
+                       pr_err("%s: Error %d writing %s\n", __func__,
+                               err, node);
+                       return err;
+               }
        }
 
        return 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
new file mode 100644 (file)
index 0000000..df1ed37
--- /dev/null
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/******************************************************************************
+ * privcmd-buf.c
+ *
+ * Mmap of hypercall buffers.
+ *
+ * Copyright (c) 2018 Juergen Gross
+ */
+
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "privcmd.h"
+
+MODULE_LICENSE("GPL");
+
+static unsigned int limit = 64;
+module_param(limit, uint, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
+                       "the privcmd-buf device per open file");
+
+struct privcmd_buf_private {
+       struct mutex lock;
+       struct list_head list;
+       unsigned int allocated;
+};
+
+struct privcmd_buf_vma_private {
+       struct privcmd_buf_private *file_priv;
+       struct list_head list;
+       unsigned int users;
+       unsigned int n_pages;
+       struct page *pages[];
+};
+
+static int privcmd_buf_open(struct inode *ino, struct file *file)
+{
+       struct privcmd_buf_private *file_priv;
+
+       file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
+               return -ENOMEM;
+
+       mutex_init(&file_priv->lock);
+       INIT_LIST_HEAD(&file_priv->list);
+
+       file->private_data = file_priv;
+
+       return 0;
+}
+
+static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
+{
+       unsigned int i;
+
+       vma_priv->file_priv->allocated -= vma_priv->n_pages;
+
+       list_del(&vma_priv->list);
+
+       for (i = 0; i < vma_priv->n_pages; i++)
+               if (vma_priv->pages[i])
+                       __free_page(vma_priv->pages[i]);
+
+       kfree(vma_priv);
+}
+
+static int privcmd_buf_release(struct inode *ino, struct file *file)
+{
+       struct privcmd_buf_private *file_priv = file->private_data;
+       struct privcmd_buf_vma_private *vma_priv;
+
+       mutex_lock(&file_priv->lock);
+
+       while (!list_empty(&file_priv->list)) {
+               vma_priv = list_first_entry(&file_priv->list,
+                                           struct privcmd_buf_vma_private,
+                                           list);
+               privcmd_buf_vmapriv_free(vma_priv);
+       }
+
+       mutex_unlock(&file_priv->lock);
+
+       kfree(file_priv);
+
+       return 0;
+}
+
+static void privcmd_buf_vma_open(struct vm_area_struct *vma)
+{
+       struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+
+       if (!vma_priv)
+               return;
+
+       mutex_lock(&vma_priv->file_priv->lock);
+       vma_priv->users++;
+       mutex_unlock(&vma_priv->file_priv->lock);
+}
+
+static void privcmd_buf_vma_close(struct vm_area_struct *vma)
+{
+       struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+       struct privcmd_buf_private *file_priv;
+
+       if (!vma_priv)
+               return;
+
+       file_priv = vma_priv->file_priv;
+
+       mutex_lock(&file_priv->lock);
+
+       vma_priv->users--;
+       if (!vma_priv->users)
+               privcmd_buf_vmapriv_free(vma_priv);
+
+       mutex_unlock(&file_priv->lock);
+}
+
+static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
+{
+       pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
+                vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
+                vmf->pgoff, (void *)vmf->address);
+
+       return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct privcmd_buf_vm_ops = {
+       .open = privcmd_buf_vma_open,
+       .close = privcmd_buf_vma_close,
+       .fault = privcmd_buf_vma_fault,
+};
+
+static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct privcmd_buf_private *file_priv = file->private_data;
+       struct privcmd_buf_vma_private *vma_priv;
+       unsigned long count = vma_pages(vma);
+       unsigned int i;
+       int ret = 0;
+
+       if (!(vma->vm_flags & VM_SHARED) || count > limit ||
+           file_priv->allocated + count > limit)
+               return -EINVAL;
+
+       vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
+                          GFP_KERNEL);
+       if (!vma_priv)
+               return -ENOMEM;
+
+       vma_priv->n_pages = count;
+       count = 0;
+       for (i = 0; i < vma_priv->n_pages; i++) {
+               vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (!vma_priv->pages[i])
+                       break;
+               count++;
+       }
+
+       mutex_lock(&file_priv->lock);
+
+       file_priv->allocated += count;
+
+       vma_priv->file_priv = file_priv;
+       vma_priv->users = 1;
+
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+       vma->vm_ops = &privcmd_buf_vm_ops;
+       vma->vm_private_data = vma_priv;
+
+       list_add(&vma_priv->list, &file_priv->list);
+
+       if (vma_priv->n_pages != count)
+               ret = -ENOMEM;
+       else
+               for (i = 0; i < vma_priv->n_pages; i++) {
+                       ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+                                            vma_priv->pages[i]);
+                       if (ret)
+                               break;
+               }
+
+       if (ret)
+               privcmd_buf_vmapriv_free(vma_priv);
+
+       mutex_unlock(&file_priv->lock);
+
+       return ret;
+}
+
+const struct file_operations xen_privcmdbuf_fops = {
+       .owner = THIS_MODULE,
+       .open = privcmd_buf_open,
+       .release = privcmd_buf_release,
+       .mmap = privcmd_buf_mmap,
+};
+EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops);
+
+struct miscdevice xen_privcmdbuf_dev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "xen/hypercall",
+       .fops = &xen_privcmdbuf_fops,
+};
index 8ae0349d9f0ae47036ed2b6b8e968230c1fdfb41..7e6e682104dc4e9a77d8149e2f4500ded81b41b8 100644 (file)
@@ -1007,12 +1007,21 @@ static int __init privcmd_init(void)
                pr_err("Could not register Xen privcmd device\n");
                return err;
        }
+
+       err = misc_register(&xen_privcmdbuf_dev);
+       if (err != 0) {
+               pr_err("Could not register Xen hypercall-buf device\n");
+               misc_deregister(&privcmd_dev);
+               return err;
+       }
+
        return 0;
 }
 
 static void __exit privcmd_exit(void)
 {
        misc_deregister(&privcmd_dev);
+       misc_deregister(&xen_privcmdbuf_dev);
 }
 
 module_init(privcmd_init);
index 14facaeed36fda1a1492aaa2a88a72bc8855c450..0dd9f8f67ee30efc849a7bdf2085036c0c0e84ab 100644 (file)
@@ -1,3 +1,6 @@
 #include <linux/fs.h>
 
 extern const struct file_operations xen_privcmd_fops;
+extern const struct file_operations xen_privcmdbuf_fops;
+
+extern struct miscdevice xen_privcmdbuf_dev;
index 7bc88fd43cfc84d05873893ef4ddec8307e76c2a..e2f3e8b0fba9ff160a7c82a37e64cf5fe0b3c8f0 100644 (file)
@@ -1012,6 +1012,7 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
 {
        struct v2p_entry *entry;
        unsigned long flags;
+       int err;
 
        if (try) {
                spin_lock_irqsave(&info->v2p_lock, flags);
@@ -1027,8 +1028,11 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
                        scsiback_del_translation_entry(info, vir);
                }
        } else if (!try) {
-               xenbus_printf(XBT_NIL, info->dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
        }
 }
 
@@ -1067,8 +1071,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
        snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
        val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
        if (IS_ERR(val)) {
-               xenbus_printf(XBT_NIL, dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
                return;
        }
        strlcpy(phy, val, VSCSI_NAMELEN);
@@ -1079,8 +1086,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
        err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
                           &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
        if (XENBUS_EXIST_ERR(err)) {
-               xenbus_printf(XBT_NIL, dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
                return;
        }
 
index e1d20124ec0e8698a1e8a5940537ff45f2e57d2c..210df9da1283744078a2c7fb6b5ae050ddf811de 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -5,7 +5,6 @@
  *     Implements an efficient asynchronous io interface.
  *
  *     Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
- *     Copyright 2018 Christoph Hellwig.
  *
  *     See ../COPYING for licensing terms.
  */
@@ -165,22 +164,10 @@ struct fsync_iocb {
        bool                    datasync;
 };
 
-struct poll_iocb {
-       struct file             *file;
-       __poll_t                events;
-       struct wait_queue_head  *head;
-
-       union {
-               struct wait_queue_entry wait;
-               struct work_struct      work;
-       };
-};
-
 struct aio_kiocb {
        union {
                struct kiocb            rw;
                struct fsync_iocb       fsync;
-               struct poll_iocb        poll;
        };
 
        struct kioctx           *ki_ctx;
@@ -1590,6 +1577,7 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
        if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
                        iocb->aio_rw_flags))
                return -EINVAL;
+
        req->file = fget(iocb->aio_fildes);
        if (unlikely(!req->file))
                return -EBADF;
@@ -1604,137 +1592,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
        return 0;
 }
 
-/* need to use list_del_init so we can check if item was present */
-static inline bool __aio_poll_remove(struct poll_iocb *req)
-{
-       if (list_empty(&req->wait.entry))
-               return false;
-       list_del_init(&req->wait.entry);
-       return true;
-}
-
-static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
-       fput(iocb->poll.file);
-       aio_complete(iocb, mangle_poll(mask), 0);
-}
-
-static void aio_poll_work(struct work_struct *work)
-{
-       struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work);
-
-       if (!list_empty_careful(&iocb->ki_list))
-               aio_remove_iocb(iocb);
-       __aio_poll_complete(iocb, iocb->poll.events);
-}
-
-static int aio_poll_cancel(struct kiocb *iocb)
-{
-       struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
-       struct poll_iocb *req = &aiocb->poll;
-       struct wait_queue_head *head = req->head;
-       bool found = false;
-
-       spin_lock(&head->lock);
-       found = __aio_poll_remove(req);
-       spin_unlock(&head->lock);
-
-       if (found) {
-               req->events = 0;
-               INIT_WORK(&req->work, aio_poll_work);
-               schedule_work(&req->work);
-       }
-       return 0;
-}
-
-static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
-               void *key)
-{
-       struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
-       struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
-       struct file *file = req->file;
-       __poll_t mask = key_to_poll(key);
-
-       assert_spin_locked(&req->head->lock);
-
-       /* for instances that support it check for an event match first: */
-       if (mask && !(mask & req->events))
-               return 0;
-
-       mask = file->f_op->poll_mask(file, req->events) & req->events;
-       if (!mask)
-               return 0;
-
-       __aio_poll_remove(req);
-
-       /*
-        * Try completing without a context switch if we can acquire ctx_lock
-        * without spinning.  Otherwise we need to defer to a workqueue to
-        * avoid a deadlock due to the lock order.
-        */
-       if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
-               list_del_init(&iocb->ki_list);
-               spin_unlock(&iocb->ki_ctx->ctx_lock);
-
-               __aio_poll_complete(iocb, mask);
-       } else {
-               req->events = mask;
-               INIT_WORK(&req->work, aio_poll_work);
-               schedule_work(&req->work);
-       }
-
-       return 1;
-}
-
-static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
-{
-       struct kioctx *ctx = aiocb->ki_ctx;
-       struct poll_iocb *req = &aiocb->poll;
-       __poll_t mask;
-
-       /* reject any unknown events outside the normal event mask. */
-       if ((u16)iocb->aio_buf != iocb->aio_buf)
-               return -EINVAL;
-       /* reject fields that are not defined for poll */
-       if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
-               return -EINVAL;
-
-       req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
-       req->file = fget(iocb->aio_fildes);
-       if (unlikely(!req->file))
-               return -EBADF;
-       if (!file_has_poll_mask(req->file))
-               goto out_fail;
-
-       req->head = req->file->f_op->get_poll_head(req->file, req->events);
-       if (!req->head)
-               goto out_fail;
-       if (IS_ERR(req->head)) {
-               mask = EPOLLERR;
-               goto done;
-       }
-
-       init_waitqueue_func_entry(&req->wait, aio_poll_wake);
-       aiocb->ki_cancel = aio_poll_cancel;
-
-       spin_lock_irq(&ctx->ctx_lock);
-       spin_lock(&req->head->lock);
-       mask = req->file->f_op->poll_mask(req->file, req->events) & req->events;
-       if (!mask) {
-               __add_wait_queue(req->head, &req->wait);
-               list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
-       }
-       spin_unlock(&req->head->lock);
-       spin_unlock_irq(&ctx->ctx_lock);
-done:
-       if (mask)
-               __aio_poll_complete(aiocb, mask);
-       return 0;
-out_fail:
-       fput(req->file);
-       return -EINVAL; /* same as no support for IOCB_CMD_POLL */
-}
-
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         bool compat)
 {
@@ -1808,9 +1665,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        case IOCB_CMD_FDSYNC:
                ret = aio_fsync(&req->fsync, &iocb, true);
                break;
-       case IOCB_CMD_POLL:
-               ret = aio_poll(req, &iocb);
-               break;
        default:
                pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
                ret = -EINVAL;
index cce6087d6880fa4c1673dbc8aab0026fc62391f4..e55843f536bcaa03b6b2aa8e1c3ffb67b946df88 100644 (file)
@@ -4542,8 +4542,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        offset_in_extent = em_start - em->start;
                em_end = extent_map_end(em);
                em_len = em_end - em_start;
-               disko = em->block_start + offset_in_extent;
                flags = 0;
+               if (em->block_start < EXTENT_MAP_LAST_BYTE)
+                       disko = em->block_start + offset_in_extent;
+               else
+                       disko = 0;
 
                /*
                 * bump off for our next call to get_extent
index e9482f0db9d08ffd79a117f0d6f08b6eb94cae99..eba61bcb9bb3cdd9759837b539c257aaaed1edef 100644 (file)
@@ -9005,13 +9005,14 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
 
        unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
 
-out_unlock:
        if (!ret2) {
                btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
                sb_end_pagefault(inode->i_sb);
                extent_changeset_free(data_reserved);
                return VM_FAULT_LOCKED;
        }
+
+out_unlock:
        unlock_page(page);
 out:
        btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
@@ -9443,6 +9444,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        u64 new_idx = 0;
        u64 root_objectid;
        int ret;
+       int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
 
@@ -9639,7 +9641,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                        dest_log_pinned = false;
                }
        }
-       ret = btrfs_end_transaction(trans);
+       ret2 = btrfs_end_transaction(trans);
+       ret = ret ? ret : ret2;
 out_notrans:
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
index c2837a32d689de9a7d5d3bfc96d7d861cd221dfb..43ecbe620deaca973a1cc513d8cb67717f6b4c16 100644 (file)
@@ -3577,7 +3577,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
                ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
                                              dst, dst_loff, &cmp);
                if (ret)
-                       goto out_unlock;
+                       goto out_free;
 
                loff += BTRFS_MAX_DEDUPE_LEN;
                dst_loff += BTRFS_MAX_DEDUPE_LEN;
@@ -3587,16 +3587,16 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
                ret = btrfs_extent_same_range(src, loff, tail_len, dst,
                                              dst_loff, &cmp);
 
+out_free:
+       kvfree(cmp.src_pages);
+       kvfree(cmp.dst_pages);
+
 out_unlock:
        if (same_inode)
                inode_unlock(src);
        else
                btrfs_double_inode_unlock(src, dst);
 
-out_free:
-       kvfree(cmp.src_pages);
-       kvfree(cmp.dst_pages);
-
        return ret;
 }
 
index 1874a6d2e6f5422c809759d0ca29e9bb973826bb..c25dc47210a397560e929f55fc3feea8f26798dd 100644 (file)
@@ -2680,8 +2680,10 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
                free_extent_buffer(scratch_leaf);
        }
 
-       if (done && !ret)
+       if (done && !ret) {
                ret = 1;
+               fs_info->qgroup_rescan_progress.objectid = (u64)-1;
+       }
        return ret;
 }
 
@@ -2784,13 +2786,20 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
 
        if (!init_flags) {
                /* we're resuming qgroup rescan at mount time */
-               if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN))
+               if (!(fs_info->qgroup_flags &
+                     BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
                        btrfs_warn(fs_info,
                        "qgroup rescan init failed, qgroup is not enabled");
-               else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+                       ret = -EINVAL;
+               } else if (!(fs_info->qgroup_flags &
+                            BTRFS_QGROUP_STATUS_FLAG_ON)) {
                        btrfs_warn(fs_info,
                        "qgroup rescan init failed, qgroup rescan is not queued");
-               return -EINVAL;
+                       ret = -EINVAL;
+               }
+
+               if (ret)
+                       return ret;
        }
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
index ee764ac352ab7b855165b797c1daf579fbaa45e1..a866be999216a81bcfa90dfcb17cc11177442731 100644 (file)
@@ -1135,6 +1135,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
        if (IS_ERR(realdn)) {
                pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
                       PTR_ERR(realdn), dn, in, ceph_vinop(in));
+               dput(dn);
                dn = realdn; /* note realdn contains the error */
                goto out;
        } else if (realdn) {
index 116146022aa1fa82d334790f7e2d7ff46b052bf3..bfe99950581527bcc494acb6419436e6373aa923 100644 (file)
@@ -126,6 +126,25 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
        seq_putc(m, '\n');
 }
 
+static void
+cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
+{
+       struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+       struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+
+       seq_printf(m, "\t\tSpeed: %zu bps\n", iface->speed);
+       seq_puts(m, "\t\tCapabilities: ");
+       if (iface->rdma_capable)
+               seq_puts(m, "rdma ");
+       if (iface->rss_capable)
+               seq_puts(m, "rss ");
+       seq_putc(m, '\n');
+       if (iface->sockaddr.ss_family == AF_INET)
+               seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
+       else if (iface->sockaddr.ss_family == AF_INET6)
+               seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
+}
+
 static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
 {
        struct list_head *tmp1, *tmp2, *tmp3;
@@ -312,6 +331,16 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
                                              mid_entry->mid);
                        }
                        spin_unlock(&GlobalMid_Lock);
+
+                       spin_lock(&ses->iface_lock);
+                       if (ses->iface_count)
+                               seq_printf(m, "\n\tServer interfaces: %zu\n",
+                                          ses->iface_count);
+                       for (j = 0; j < ses->iface_count; j++) {
+                               seq_printf(m, "\t%d)\n", j);
+                               cifs_dump_iface(m, &ses->iface_list[j]);
+                       }
+                       spin_unlock(&ses->iface_lock);
                }
        }
        spin_unlock(&cifs_tcp_ses_lock);
index 937251cc61c046916228f150916c8c0a82a442a5..ee2a8ec70056f7451695cb75bfe1e00a95280ff0 100644 (file)
@@ -37,7 +37,6 @@
 #include <crypto/aead.h>
 
 int __cifs_calc_signature(struct smb_rqst *rqst,
-                       int start,
                        struct TCP_Server_Info *server, char *signature,
                        struct shash_desc *shash)
 {
@@ -45,16 +44,27 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
        int rc;
        struct kvec *iov = rqst->rq_iov;
        int n_vec = rqst->rq_nvec;
+       int is_smb2 = server->vals->header_preamble_size == 0;
 
-       for (i = start; i < n_vec; i++) {
+       /* iov[0] is actual data and not the rfc1002 length for SMB2+ */
+       if (is_smb2) {
+               if (iov[0].iov_len <= 4)
+                       return -EIO;
+               i = 0;
+       } else {
+               if (n_vec < 2 || iov[0].iov_len != 4)
+                       return -EIO;
+               i = 1; /* skip rfc1002 length */
+       }
+
+       for (; i < n_vec; i++) {
                if (iov[i].iov_len == 0)
                        continue;
                if (iov[i].iov_base == NULL) {
                        cifs_dbg(VFS, "null iovec entry\n");
                        return -EIO;
                }
-               if (i == 1 && iov[1].iov_len <= 4)
-                       break; /* nothing to sign or corrupt header */
+
                rc = crypto_shash_update(shash,
                                         iov[i].iov_base, iov[i].iov_len);
                if (rc) {
@@ -118,7 +128,7 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
                return rc;
        }
 
-       return __cifs_calc_signature(rqst, 1, server, signature,
+       return __cifs_calc_signature(rqst, server, signature,
                                     &server->secmech.sdescmd5->shash);
 }
 
index 1efa2e65bc1a8971f01811ac699a82cb7c1f1727..bd78da59a4fdcd7e84ff9b57e01753ec64ca746d 100644 (file)
@@ -33,6 +33,9 @@
 
 #define CIFS_MAGIC_NUMBER 0xFF534D42      /* the first four bytes of SMB PDUs */
 
+#define CIFS_PORT 445
+#define RFC1001_PORT 139
+
 /*
  * The sizes of various internal tables and strings
  */
@@ -312,6 +315,10 @@ struct smb_version_operations {
        /* send echo request */
        int (*echo)(struct TCP_Server_Info *);
        /* create directory */
+       int (*posix_mkdir)(const unsigned int xid, struct inode *inode,
+                       umode_t mode, struct cifs_tcon *tcon,
+                       const char *full_path,
+                       struct cifs_sb_info *cifs_sb);
        int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *,
                     struct cifs_sb_info *);
        /* set info on created directory */
@@ -838,6 +845,13 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
 
 #endif
 
+struct cifs_server_iface {
+       size_t speed;
+       unsigned int rdma_capable : 1;
+       unsigned int rss_capable : 1;
+       struct sockaddr_storage sockaddr;
+};
+
 /*
  * Session structure.  One of these for each uid session with a particular host
  */
@@ -875,6 +889,20 @@ struct cifs_ses {
 #ifdef CONFIG_CIFS_SMB311
        __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
 #endif /* 3.1.1 */
+
+       /*
+        * Network interfaces available on the server this session is
+        * connected to.
+        *
+        * Other channels can be opened by connecting and binding this
+        * session to interfaces from this list.
+        *
+        * iface_lock should be taken when accessing any of these fields
+        */
+       spinlock_t iface_lock;
+       struct cifs_server_iface *iface_list;
+       size_t iface_count;
+       unsigned long iface_last_update; /* jiffies */
 };
 
 static inline bool
@@ -883,6 +911,14 @@ cap_unix(struct cifs_ses *ses)
        return ses->server->vals->cap_unix & ses->capabilities;
 }
 
+struct cached_fid {
+       bool is_valid:1;        /* Do we have a useable root fid */
+       struct cifs_fid *fid;
+       struct mutex fid_mutex;
+       struct cifs_tcon *tcon;
+       struct work_struct lease_break;
+};
+
 /*
  * there is one of these for each connection to a resource on a particular
  * session
@@ -987,9 +1023,7 @@ struct cifs_tcon {
        struct fscache_cookie *fscache; /* cookie for share */
 #endif
        struct list_head pending_opens; /* list of incomplete opens */
-       bool valid_root_fid:1;  /* Do we have a useable root fid */
-       struct mutex prfid_mutex; /* prevents reopen race after dead ses*/
-       struct cifs_fid *prfid; /* handle to the directory at top of share */
+       struct cached_fid crfid; /* Cached root fid */
        /* BB add field for back pointer to sb struct(s)? */
 };
 
index 4e0d183c3d1016918d9934420af6e626d128d077..03018be1728333905ad0ae4eb6dde79aea5102fc 100644 (file)
@@ -112,10 +112,6 @@ extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
                        struct kvec *, int /* nvec to send */,
                        int * /* type of buf returned */, const int flags,
                        struct kvec * /* resp vec */);
-extern int smb2_send_recv(const unsigned int xid, struct cifs_ses *pses,
-                         struct kvec *pkvec, int nvec_to_send,
-                         int *pbuftype, const int flags,
-                         struct kvec *presp);
 extern int SendReceiveBlockingLock(const unsigned int xid,
                        struct cifs_tcon *ptcon,
                        struct smb_hdr *in_buf ,
@@ -544,7 +540,7 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
                           struct cifs_sb_info *cifs_sb,
                           const unsigned char *path, char *pbuf,
                           unsigned int *pbytes_written);
-int __cifs_calc_signature(struct smb_rqst *rqst, int start,
+int __cifs_calc_signature(struct smb_rqst *rqst,
                        struct TCP_Server_Info *server, char *signature,
                        struct shash_desc *shash);
 enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
@@ -552,6 +548,7 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
 struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
 void cifs_aio_ctx_release(struct kref *refcount);
 int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
+void smb2_cached_lease_break(struct work_struct *work);
 
 int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
                    struct sdesc **sdesc);
index 42329b25877db2b3de349b0ce5723f70bebad92b..d352da325de34d3c11b6dbb8c6579f6e4f0a216f 100644 (file)
@@ -107,10 +107,10 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
        }
        spin_unlock(&tcon->open_file_lock);
 
-       mutex_lock(&tcon->prfid_mutex);
-       tcon->valid_root_fid = false;
-       memset(tcon->prfid, 0, sizeof(struct cifs_fid));
-       mutex_unlock(&tcon->prfid_mutex);
+       mutex_lock(&tcon->crfid.fid_mutex);
+       tcon->crfid.is_valid = false;
+       memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
+       mutex_unlock(&tcon->crfid.fid_mutex);
 
        /*
         * BB Add call to invalidate_inodes(sb) for all superblocks mounted
index 96645a7d8f27144a885863578d33e2b757afeec6..a57da1b88bdf5b5342b326ea06eb8686b2882cbe 100644 (file)
@@ -57,9 +57,6 @@
 #include "smb2proto.h"
 #include "smbdirect.h"
 
-#define CIFS_PORT 445
-#define RFC1001_PORT 139
-
 extern mempool_t *cifs_req_poolp;
 extern bool disable_legacy_dialects;
 
@@ -3029,8 +3026,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
 
 #ifdef CONFIG_CIFS_SMB311
        if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
-               if (ses->server->vals->protocol_id == SMB311_PROT_ID)
+               if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
                        tcon->posix_extensions = true;
+                       printk_once(KERN_WARNING
+                               "SMB3.11 POSIX Extensions are experimental\n");
+               }
        }
 #endif /* 311 */
 
index f4697f548a394dbf5c42f731bf13bd529c9aaea0..a2cfb33e85c1f8cb25a2d32a52bb5d60c93b79f1 100644 (file)
@@ -1575,6 +1575,17 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                goto mkdir_out;
        }
 
+       server = tcon->ses->server;
+
+#ifdef CONFIG_CIFS_SMB311
+       if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
+               rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
+                                             cifs_sb);
+               d_drop(direntry); /* for time being always refresh inode info */
+               goto mkdir_out;
+       }
+#endif /* SMB311 */
+
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
@@ -1583,8 +1594,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                        goto mkdir_out;
        }
 
-       server = tcon->ses->server;
-
        if (!server->ops->mkdir) {
                rc = -ENOSYS;
                goto mkdir_out;
index af29ade195c002c0323d855edb391a155f1620f7..53e8362cbc4a953218d3fbd50f1c7133e5435cc9 100644 (file)
@@ -82,6 +82,7 @@ sesInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->smb_ses_list);
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                mutex_init(&ret_buf->session_mutex);
+               spin_lock_init(&ret_buf->iface_lock);
        }
        return ret_buf;
 }
@@ -102,6 +103,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
        kfree(buf_to_free->user_name);
        kfree(buf_to_free->domainName);
        kzfree(buf_to_free->auth_key.response);
+       kfree(buf_to_free->iface_list);
        kzfree(buf_to_free);
 }
 
@@ -117,8 +119,9 @@ tconInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->openFileList);
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                spin_lock_init(&ret_buf->open_file_lock);
-               mutex_init(&ret_buf->prfid_mutex);
-               ret_buf->prfid = kzalloc(sizeof(struct cifs_fid), GFP_KERNEL);
+               mutex_init(&ret_buf->crfid.fid_mutex);
+               ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
+                                            GFP_KERNEL);
 #ifdef CONFIG_CIFS_STATS
                spin_lock_init(&ret_buf->stat_lock);
 #endif
@@ -136,7 +139,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
        atomic_dec(&tconInfoAllocCount);
        kfree(buf_to_free->nativeFileSystem);
        kzfree(buf_to_free->password);
-       kfree(buf_to_free->prfid);
+       kfree(buf_to_free->crfid.fid);
        kfree(buf_to_free);
 }
 
index e2bec47c684580089a70e7914ec71d2f523da3e3..3ff7cec2da81141f67482c57ab03de52aed855ba 100644 (file)
@@ -454,7 +454,8 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
 #ifdef CONFIG_CIFS_SMB311
        /* SMB311 POSIX extensions paths do not include leading slash */
        else if (cifs_sb_master_tlink(cifs_sb) &&
-                cifs_sb_master_tcon(cifs_sb)->posix_extensions) {
+                cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
+                (from[0] == '/')) {
                start_of_path = from + 1;
        }
 #endif /* 311 */
@@ -492,10 +493,11 @@ cifs_ses_oplock_break(struct work_struct *work)
 {
        struct smb2_lease_break_work *lw = container_of(work,
                                struct smb2_lease_break_work, lease_break);
-       int rc;
+       int rc = 0;
 
        rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
                              lw->lease_state);
+
        cifs_dbg(FYI, "Lease release rc %d\n", rc);
        cifs_put_tlink(lw->tlink);
        kfree(lw);
@@ -561,6 +563,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
 
                open->oplock = lease_state;
        }
+
        return found;
 }
 
@@ -603,6 +606,18 @@ smb2_is_valid_lease_break(char *buffer)
                                        return true;
                                }
                                spin_unlock(&tcon->open_file_lock);
+
+                               if (tcon->crfid.is_valid &&
+                                   !memcmp(rsp->LeaseKey,
+                                           tcon->crfid.fid->lease_key,
+                                           SMB2_LEASE_KEY_SIZE)) {
+                                       INIT_WORK(&tcon->crfid.lease_break,
+                                                 smb2_cached_lease_break);
+                                       queue_work(cifsiod_wq,
+                                                  &tcon->crfid.lease_break);
+                                       spin_unlock(&cifs_tcp_ses_lock);
+                                       return true;
+                               }
                        }
                }
        }
index b15f5957d64591f0af611670088dd4dd8439fb43..0356b5559c711ffa20d2710425b823106b0a0dc5 100644 (file)
@@ -294,34 +294,191 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
        return rsize;
 }
 
-#ifdef CONFIG_CIFS_STATS2
+
+static int
+parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+                       size_t buf_len,
+                       struct cifs_server_iface **iface_list,
+                       size_t *iface_count)
+{
+       struct network_interface_info_ioctl_rsp *p;
+       struct sockaddr_in *addr4;
+       struct sockaddr_in6 *addr6;
+       struct iface_info_ipv4 *p4;
+       struct iface_info_ipv6 *p6;
+       struct cifs_server_iface *info;
+       ssize_t bytes_left;
+       size_t next = 0;
+       int nb_iface = 0;
+       int rc = 0;
+
+       *iface_list = NULL;
+       *iface_count = 0;
+
+       /*
+        * Fist pass: count and sanity check
+        */
+
+       bytes_left = buf_len;
+       p = buf;
+       while (bytes_left >= sizeof(*p)) {
+               nb_iface++;
+               next = le32_to_cpu(p->Next);
+               if (!next) {
+                       bytes_left -= sizeof(*p);
+                       break;
+               }
+               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+               bytes_left -= next;
+       }
+
+       if (!nb_iface) {
+               cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       if (bytes_left || p->Next)
+               cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+
+
+       /*
+        * Second pass: extract info to internal structure
+        */
+
+       *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
+       if (!*iface_list) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       info = *iface_list;
+       bytes_left = buf_len;
+       p = buf;
+       while (bytes_left >= sizeof(*p)) {
+               info->speed = le64_to_cpu(p->LinkSpeed);
+               info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
+               info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
+
+               cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
+               cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
+               cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
+                        le32_to_cpu(p->Capability));
+
+               switch (p->Family) {
+               /*
+                * The kernel and wire socket structures have the same
+                * layout and use network byte order but make the
+                * conversion explicit in case either one changes.
+                */
+               case INTERNETWORK:
+                       addr4 = (struct sockaddr_in *)&info->sockaddr;
+                       p4 = (struct iface_info_ipv4 *)p->Buffer;
+                       addr4->sin_family = AF_INET;
+                       memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
+
+                       /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
+                       addr4->sin_port = cpu_to_be16(CIFS_PORT);
+
+                       cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
+                                &addr4->sin_addr);
+                       break;
+               case INTERNETWORKV6:
+                       addr6 = (struct sockaddr_in6 *)&info->sockaddr;
+                       p6 = (struct iface_info_ipv6 *)p->Buffer;
+                       addr6->sin6_family = AF_INET6;
+                       memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
+
+                       /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
+                       addr6->sin6_flowinfo = 0;
+                       addr6->sin6_scope_id = 0;
+                       addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+
+                       cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
+                                &addr6->sin6_addr);
+                       break;
+               default:
+                       cifs_dbg(VFS,
+                                "%s: skipping unsupported socket family\n",
+                                __func__);
+                       goto next_iface;
+               }
+
+               (*iface_count)++;
+               info++;
+next_iface:
+               next = le32_to_cpu(p->Next);
+               if (!next)
+                       break;
+               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+               bytes_left -= next;
+       }
+
+       if (!*iface_count) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+out:
+       if (rc) {
+               kfree(*iface_list);
+               *iface_count = 0;
+               *iface_list = NULL;
+       }
+       return rc;
+}
+
+
 static int
 SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
 {
        int rc;
        unsigned int ret_data_len = 0;
-       struct network_interface_info_ioctl_rsp *out_buf;
+       struct network_interface_info_ioctl_rsp *out_buf = NULL;
+       struct cifs_server_iface *iface_list;
+       size_t iface_count;
+       struct cifs_ses *ses = tcon->ses;
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                        FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
                        NULL /* no data input */, 0 /* no data input */,
                        (char **)&out_buf, &ret_data_len);
-       if (rc != 0)
+       if (rc != 0) {
                cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
-       else if (ret_data_len < sizeof(struct network_interface_info_ioctl_rsp)) {
-               cifs_dbg(VFS, "server returned bad net interface info buf\n");
-               rc = -EINVAL;
-       } else {
-               /* Dump info on first interface */
-               cifs_dbg(FYI, "Adapter Capability 0x%x\t",
-                       le32_to_cpu(out_buf->Capability));
-               cifs_dbg(FYI, "Link Speed %lld\n",
-                       le64_to_cpu(out_buf->LinkSpeed));
+               goto out;
        }
+
+       rc = parse_server_interfaces(out_buf, ret_data_len,
+                                    &iface_list, &iface_count);
+       if (rc)
+               goto out;
+
+       spin_lock(&ses->iface_lock);
+       kfree(ses->iface_list);
+       ses->iface_list = iface_list;
+       ses->iface_count = iface_count;
+       ses->iface_last_update = jiffies;
+       spin_unlock(&ses->iface_lock);
+
+out:
        kfree(out_buf);
        return rc;
 }
-#endif /* STATS2 */
+
+void
+smb2_cached_lease_break(struct work_struct *work)
+{
+       struct cached_fid *cfid = container_of(work,
+                               struct cached_fid, lease_break);
+       mutex_lock(&cfid->fid_mutex);
+       if (cfid->is_valid) {
+               cifs_dbg(FYI, "clear cached root file handle\n");
+               SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
+                          cfid->fid->volatile_fid);
+               cfid->is_valid = false;
+       }
+       mutex_unlock(&cfid->fid_mutex);
+}
 
 /*
  * Open the directory at the root of a share
@@ -331,13 +488,13 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        struct cifs_open_parms oparams;
        int rc;
        __le16 srch_path = 0; /* Null - since an open of top of share */
-       u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+       u8 oplock = SMB2_OPLOCK_LEVEL_II;
 
-       mutex_lock(&tcon->prfid_mutex);
-       if (tcon->valid_root_fid) {
+       mutex_lock(&tcon->crfid.fid_mutex);
+       if (tcon->crfid.is_valid) {
                cifs_dbg(FYI, "found a cached root file handle\n");
-               memcpy(pfid, tcon->prfid, sizeof(struct cifs_fid));
-               mutex_unlock(&tcon->prfid_mutex);
+               memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
+               mutex_unlock(&tcon->crfid.fid_mutex);
                return 0;
        }
 
@@ -350,10 +507,11 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
 
        rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
        if (rc == 0) {
-               memcpy(tcon->prfid, pfid, sizeof(struct cifs_fid));
-               tcon->valid_root_fid = true;
+               memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
+               tcon->crfid.tcon = tcon;
+               tcon->crfid.is_valid = true;
        }
-       mutex_unlock(&tcon->prfid_mutex);
+       mutex_unlock(&tcon->crfid.fid_mutex);
        return rc;
 }
 
@@ -383,9 +541,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
        if (rc)
                return;
 
-#ifdef CONFIG_CIFS_STATS2
        SMB3_request_interfaces(xid, tcon);
-#endif /* STATS2 */
 
        SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
                        FS_ATTRIBUTE_INFORMATION);
@@ -436,7 +592,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
        struct cifs_open_parms oparms;
        struct cifs_fid fid;
 
-       if ((*full_path == 0) && tcon->valid_root_fid)
+       if ((*full_path == 0) && tcon->crfid.is_valid)
                return 0;
 
        utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
@@ -2151,7 +2307,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
                   struct smb_rqst *old_rq)
 {
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)old_rq->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
 
        memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
        tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
@@ -2171,14 +2327,13 @@ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
 }
 
 /* Assumes:
- * rqst->rq_iov[0]  is rfc1002 length
- * rqst->rq_iov[1]  is tranform header
- * rqst->rq_iov[2+] data to be encrypted/decrypted
+ * rqst->rq_iov[0]  is transform header
+ * rqst->rq_iov[1+] data to be encrypted/decrypted
  */
 static struct scatterlist *
 init_sg(struct smb_rqst *rqst, u8 *sign)
 {
-       unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages;
+       unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages + 1;
        unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
        struct scatterlist *sg;
        unsigned int i;
@@ -2189,10 +2344,10 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
                return NULL;
 
        sg_init_table(sg, sg_len);
-       smb2_sg_set_buf(&sg[0], rqst->rq_iov[1].iov_base + 20, assoc_data_len);
-       for (i = 1; i < rqst->rq_nvec - 1; i++)
-               smb2_sg_set_buf(&sg[i], rqst->rq_iov[i+1].iov_base,
-                                               rqst->rq_iov[i+1].iov_len);
+       smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 20, assoc_data_len);
+       for (i = 1; i < rqst->rq_nvec; i++)
+               smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
+                                               rqst->rq_iov[i].iov_len);
        for (j = 0; i < sg_len - 1; i++, j++) {
                unsigned int len, offset;
 
@@ -2224,18 +2379,17 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
        return 1;
 }
 /*
- * Encrypt or decrypt @rqst message. @rqst has the following format:
- * iov[0] - rfc1002 length
- * iov[1] - transform header (associate data),
- * iov[2-N] and pages - data to encrypt.
- * On success return encrypted data in iov[2-N] and pages, leave iov[0-1]
+ * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
+ * iov[0]   - transform header (associate data),
+ * iov[1-N] - SMB2 header and pages - data to encrypt.
+ * On success return encrypted data in iov[1-N] and pages, leave iov[0]
  * untouched.
  */
 static int
 crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
 {
        struct smb2_transform_hdr *tr_hdr =
-                       (struct smb2_transform_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base;
        unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
        int rc = 0;
        struct scatterlist *sg;
@@ -2323,10 +2477,6 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
        return rc;
 }
 
-/*
- * This is called from smb_send_rqst. At this point we have the rfc1002
- * header as the first element in the vector.
- */
 static int
 smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
                       struct smb_rqst *old_rq)
@@ -2335,7 +2485,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
        struct page **pages;
        struct smb2_transform_hdr *tr_hdr;
        unsigned int npages = old_rq->rq_npages;
-       unsigned int orig_len = get_rfc1002_length(old_rq->rq_iov[0].iov_base);
+       unsigned int orig_len;
        int i;
        int rc = -ENOMEM;
 
@@ -2355,18 +2505,14 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
                        goto err_free_pages;
        }
 
-       /* Make space for one extra iov to hold the transform header */
        iov = kmalloc_array(old_rq->rq_nvec + 1, sizeof(struct kvec),
                            GFP_KERNEL);
        if (!iov)
                goto err_free_pages;
 
-       /* copy all iovs from the old except the 1st one (rfc1002 length) */
-       memcpy(&iov[2], &old_rq->rq_iov[1],
-                               sizeof(struct kvec) * (old_rq->rq_nvec - 1));
-       /* copy the rfc1002 iov */
-       iov[0].iov_base = old_rq->rq_iov[0].iov_base;
-       iov[0].iov_len  = old_rq->rq_iov[0].iov_len;
+       /* copy all iovs from the old */
+       memcpy(&iov[1], &old_rq->rq_iov[0],
+                               sizeof(struct kvec) * old_rq->rq_nvec);
 
        new_rq->rq_iov = iov;
        new_rq->rq_nvec = old_rq->rq_nvec + 1;
@@ -2375,14 +2521,12 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
        if (!tr_hdr)
                goto err_free_iov;
 
+       orig_len = smb2_rqst_len(old_rq, false);
+
        /* fill the 2nd iov with a transform header */
        fill_transform_hdr(tr_hdr, orig_len, old_rq);
-       new_rq->rq_iov[1].iov_base = tr_hdr;
-       new_rq->rq_iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-
-       /* Update rfc1002 header */
-       inc_rfc1001_len(new_rq->rq_iov[0].iov_base,
-                       sizeof(struct smb2_transform_hdr));
+       new_rq->rq_iov[0].iov_base = tr_hdr;
+       new_rq->rq_iov[0].iov_len = sizeof(struct smb2_transform_hdr);
 
        /* copy pages form the old */
        for (i = 0; i < npages; i++) {
@@ -2426,7 +2570,7 @@ smb3_free_transform_rq(struct smb_rqst *rqst)
                put_page(rqst->rq_pages[i]);
        kfree(rqst->rq_pages);
        /* free transform header */
-       kfree(rqst->rq_iov[1].iov_base);
+       kfree(rqst->rq_iov[0].iov_base);
        kfree(rqst->rq_iov);
 }
 
@@ -2443,19 +2587,17 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
                 unsigned int buf_data_size, struct page **pages,
                 unsigned int npages, unsigned int page_data_size)
 {
-       struct kvec iov[3];
+       struct kvec iov[2];
        struct smb_rqst rqst = {NULL};
        int rc;
 
-       iov[0].iov_base = NULL;
-       iov[0].iov_len = 0;
-       iov[1].iov_base = buf;
-       iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-       iov[2].iov_base = buf + sizeof(struct smb2_transform_hdr);
-       iov[2].iov_len = buf_data_size;
+       iov[0].iov_base = buf;
+       iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+       iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+       iov[1].iov_len = buf_data_size;
 
        rqst.rq_iov = iov;
-       rqst.rq_nvec = 3;
+       rqst.rq_nvec = 2;
        rqst.rq_pages = pages;
        rqst.rq_npages = npages;
        rqst.rq_pagesz = PAGE_SIZE;
@@ -2467,7 +2609,7 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
        if (rc)
                return rc;
 
-       memmove(buf, iov[2].iov_base, buf_data_size);
+       memmove(buf, iov[1].iov_base, buf_data_size);
 
        server->total_read = buf_data_size + page_data_size;
 
@@ -3170,6 +3312,7 @@ struct smb_version_operations smb311_operations = {
        .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
+       .posix_mkdir = smb311_posix_mkdir,
        .rmdir = smb2_rmdir,
        .unlink = smb2_unlink,
        .rename = smb2_rename_path,
index af032e1a3eac7adaf0570f5923e0ba6164e8ed6b..810b85787c9133909ef3731010b2bce604963b37 100644 (file)
@@ -602,6 +602,7 @@ static void assemble_neg_contexts(struct smb2_negotiate_req *req,
 int
 SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
 {
+       struct smb_rqst rqst;
        struct smb2_negotiate_req *req;
        struct smb2_negotiate_rsp *rsp;
        struct kvec iov[1];
@@ -673,7 +674,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
        /*
@@ -990,8 +995,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
        req->PreviousSessionId = sess_data->previous_session;
 
        req->Flags = 0; /* MBZ */
-       /* to enable echos and oplocks */
-       req->sync_hdr.CreditRequest = cpu_to_le16(3);
+
+       /* enough to enable echos and oplocks and one max size write */
+       req->sync_hdr.CreditRequest = cpu_to_le16(130);
 
        /* only one of SMB2 signing flags may be set in SMB2 request */
        if (server->sign)
@@ -1027,6 +1033,7 @@ static int
 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
 {
        int rc;
+       struct smb_rqst rqst;
        struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
        struct kvec rsp_iov = { NULL, 0 };
 
@@ -1035,10 +1042,13 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
                cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
        req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
 
-       /* BB add code to build os and lm fields */
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = sess_data->iov;
+       rqst.rq_nvec = 2;
 
-       rc = smb2_send_recv(sess_data->xid, sess_data->ses,
-                           sess_data->iov, 2,
+       /* BB add code to build os and lm fields */
+       rc = cifs_send_recv(sess_data->xid, sess_data->ses,
+                           &rqst,
                            &sess_data->buf0_type,
                            CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
        cifs_small_buf_release(sess_data->iov[0].iov_base);
@@ -1376,6 +1386,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
 int
 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
 {
+       struct smb_rqst rqst;
        struct smb2_logoff_req *req; /* response is also trivial struct */
        int rc = 0;
        struct TCP_Server_Info *server;
@@ -1413,7 +1424,11 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        /*
         * No tcon so can't do
@@ -1443,6 +1458,7 @@ int
 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
          struct cifs_tcon *tcon, const struct nls_table *cp)
 {
+       struct smb_rqst rqst;
        struct smb2_tree_connect_req *req;
        struct smb2_tree_connect_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -1499,7 +1515,11 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
            !smb3_encryption_required(tcon))
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
-       rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
 
@@ -1563,6 +1583,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
 int
 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
 {
+       struct smb_rqst rqst;
        struct smb2_tree_disconnect_req *req; /* response is trivial */
        int rc = 0;
        struct cifs_ses *ses = tcon->ses;
@@ -1593,7 +1614,11 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        if (rc)
                cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -1886,11 +1911,165 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
        return 0;
 }
 
+#ifdef CONFIG_CIFS_SMB311
+int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+                              umode_t mode, struct cifs_tcon *tcon,
+                              const char *full_path,
+                              struct cifs_sb_info *cifs_sb)
+{
+       struct smb_rqst rqst;
+       struct smb2_create_req *req;
+       struct smb2_create_rsp *rsp;
+       struct TCP_Server_Info *server;
+       struct cifs_ses *ses = tcon->ses;
+       struct kvec iov[3]; /* make sure at least one for each open context */
+       struct kvec rsp_iov = {NULL, 0};
+       int resp_buftype;
+       int uni_path_len;
+       __le16 *copy_path = NULL;
+       int copy_size;
+       int rc = 0;
+       unsigned int n_iov = 2;
+       __u32 file_attributes = 0;
+       char *pc_buf = NULL;
+       int flags = 0;
+       unsigned int total_len;
+       __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+
+       if (!path)
+               return -ENOMEM;
+
+       cifs_dbg(FYI, "mkdir\n");
+
+       if (ses && (ses->server))
+               server = ses->server;
+       else
+               return -EIO;
+
+       rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+
+       if (rc)
+               return rc;
+
+       if (smb3_encryption_required(tcon))
+               flags |= CIFS_TRANSFORM_REQ;
+
+
+       req->ImpersonationLevel = IL_IMPERSONATION;
+       req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
+       /* File attributes ignored on open (used in create though) */
+       req->FileAttributes = cpu_to_le32(file_attributes);
+       req->ShareAccess = FILE_SHARE_ALL_LE;
+       req->CreateDisposition = cpu_to_le32(FILE_CREATE);
+       req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
+
+       iov[0].iov_base = (char *)req;
+       /* -1 since last byte is buf[0] which is sent below (path) */
+       iov[0].iov_len = total_len - 1;
+
+       req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
+
+       /* [MS-SMB2] 2.2.13 NameOffset:
+        * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
+        * the SMB2 header, the file name includes a prefix that will
+        * be processed during DFS name normalization as specified in
+        * section 3.3.5.9. Otherwise, the file name is relative to
+        * the share that is identified by the TreeId in the SMB2
+        * header.
+        */
+       if (tcon->share_flags & SHI1005_FLAGS_DFS) {
+               int name_len;
+
+               req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+               rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+                                                &name_len,
+                                                tcon->treeName, path);
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       return rc;
+               }
+               req->NameLength = cpu_to_le16(name_len * 2);
+               uni_path_len = copy_size;
+               path = copy_path;
+       } else {
+               uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+               /* MUST set path len (NameLength) to 0 opening root of share */
+               req->NameLength = cpu_to_le16(uni_path_len - 2);
+               if (uni_path_len % 8 != 0) {
+                       copy_size = roundup(uni_path_len, 8);
+                       copy_path = kzalloc(copy_size, GFP_KERNEL);
+                       if (!copy_path) {
+                               cifs_small_buf_release(req);
+                               return -ENOMEM;
+                       }
+                       memcpy((char *)copy_path, (const char *)path,
+                              uni_path_len);
+                       uni_path_len = copy_size;
+                       path = copy_path;
+               }
+       }
+
+       iov[1].iov_len = uni_path_len;
+       iov[1].iov_base = path;
+       req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+
+       if (tcon->posix_extensions) {
+               if (n_iov > 2) {
+                       struct create_context *ccontext =
+                           (struct create_context *)iov[n_iov-1].iov_base;
+                       ccontext->Next =
+                               cpu_to_le32(iov[n_iov-1].iov_len);
+               }
+
+               rc = add_posix_context(iov, &n_iov, mode);
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       kfree(copy_path);
+                       return rc;
+               }
+               pc_buf = iov[n_iov-1].iov_base;
+       }
+
+
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+                           &rsp_iov);
+
+       cifs_small_buf_release(req);
+       rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+
+       if (rc != 0) {
+               cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+               trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
+                                   CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
+               goto smb311_mkdir_exit;
+       } else
+               trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
+                                    ses->Suid, CREATE_NOT_FILE,
+                                    FILE_WRITE_ATTRIBUTES);
+
+       SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
+
+       /* Eventually save off posix specific response info and timestaps */
+
+smb311_mkdir_exit:
+       kfree(copy_path);
+       kfree(pc_buf);
+       free_rsp_buf(resp_buftype, rsp);
+       return rc;
+
+}
+#endif /* SMB311 */
+
 int
 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
          __u8 *oplock, struct smb2_file_all_info *buf,
          struct kvec *err_iov, int *buftype)
 {
+       struct smb_rqst rqst;
        struct smb2_create_req *req;
        struct smb2_create_rsp *rsp;
        struct TCP_Server_Info *server;
@@ -2043,7 +2222,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        }
 #endif /* SMB311 */
 
-       rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -2099,6 +2282,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           char *in_data, u32 indatalen,
           char **out_data, u32 *plen /* returned data len */)
 {
+       struct smb_rqst rqst;
        struct smb2_ioctl_req *req;
        struct smb2_ioctl_rsp *rsp;
        struct cifs_ses *ses;
@@ -2189,7 +2373,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
-       rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -2274,6 +2462,7 @@ int
 SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
                 u64 persistent_fid, u64 volatile_fid, int flags)
 {
+       struct smb_rqst rqst;
        struct smb2_close_req *req;
        struct smb2_close_rsp *rsp;
        struct cifs_ses *ses = tcon->ses;
@@ -2301,7 +2490,11 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
 
@@ -2387,6 +2580,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
           u32 additional_info, size_t output_len, size_t min_len, void **data,
                u32 *dlen)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_req *req;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -2427,7 +2621,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        /* 1 for Buffer */
        iov[0].iov_len = total_len - 1;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
 
@@ -2594,11 +2792,10 @@ SMB2_echo(struct TCP_Server_Info *server)
 {
        struct smb2_echo_req *req;
        int rc = 0;
-       struct kvec iov[2];
+       struct kvec iov[1];
        struct smb_rqst rqst = { .rq_iov = iov,
-                                .rq_nvec = 2 };
+                                .rq_nvec = 1 };
        unsigned int total_len;
-       __be32 rfc1002_marker;
 
        cifs_dbg(FYI, "In echo request\n");
 
@@ -2614,11 +2811,8 @@ SMB2_echo(struct TCP_Server_Info *server)
 
        req->sync_hdr.CreditRequest = cpu_to_le16(1);
 
-       iov[0].iov_len = 4;
-       rfc1002_marker = cpu_to_be32(total_len);
-       iov[0].iov_base = &rfc1002_marker;
-       iov[1].iov_len = total_len;
-       iov[1].iov_base = (char *)req;
+       iov[0].iov_len = total_len;
+       iov[0].iov_base = (char *)req;
 
        rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
                             server, CIFS_ECHO_OP);
@@ -2633,6 +2827,7 @@ int
 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           u64 volatile_fid)
 {
+       struct smb_rqst rqst;
        struct smb2_flush_req *req;
        struct cifs_ses *ses = tcon->ses;
        struct kvec iov[1];
@@ -2660,7 +2855,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc != 0) {
@@ -2848,10 +3047,9 @@ smb2_async_readv(struct cifs_readdata *rdata)
        struct smb2_sync_hdr *shdr;
        struct cifs_io_parms io_parms;
        struct smb_rqst rqst = { .rq_iov = rdata->iov,
-                                .rq_nvec = 2 };
+                                .rq_nvec = 1 };
        struct TCP_Server_Info *server;
        unsigned int total_len;
-       __be32 req_len;
 
        cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
                 __func__, rdata->offset, rdata->bytes);
@@ -2882,12 +3080,8 @@ smb2_async_readv(struct cifs_readdata *rdata)
        if (smb3_encryption_required(io_parms.tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       req_len = cpu_to_be32(total_len);
-
-       rdata->iov[0].iov_base = &req_len;
-       rdata->iov[0].iov_len = sizeof(__be32);
-       rdata->iov[1].iov_base = buf;
-       rdata->iov[1].iov_len = total_len;
+       rdata->iov[0].iov_base = buf;
+       rdata->iov[0].iov_len = total_len;
 
        shdr = (struct smb2_sync_hdr *)buf;
 
@@ -2926,6 +3120,7 @@ int
 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
          unsigned int *nbytes, char **buf, int *buf_type)
 {
+       struct smb_rqst rqst;
        int resp_buftype, rc = -EACCES;
        struct smb2_read_plain_req *req = NULL;
        struct smb2_read_rsp *rsp = NULL;
@@ -2946,7 +3141,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
@@ -3062,10 +3261,9 @@ smb2_async_writev(struct cifs_writedata *wdata,
        struct smb2_sync_hdr *shdr;
        struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
        struct TCP_Server_Info *server = tcon->ses->server;
-       struct kvec iov[2];
+       struct kvec iov[1];
        struct smb_rqst rqst = { };
        unsigned int total_len;
-       __be32 rfc1002_marker;
 
        rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
        if (rc) {
@@ -3137,15 +3335,11 @@ smb2_async_writev(struct cifs_writedata *wdata,
                v1->length = cpu_to_le32(wdata->mr->mr->length);
        }
 #endif
-       /* 4 for rfc1002 length field and 1 for Buffer */
-       iov[0].iov_len = 4;
-       rfc1002_marker = cpu_to_be32(total_len - 1 + wdata->bytes);
-       iov[0].iov_base = &rfc1002_marker;
-       iov[1].iov_len = total_len - 1;
-       iov[1].iov_base = (char *)req;
+       iov[0].iov_len = total_len - 1;
+       iov[0].iov_base = (char *)req;
 
        rqst.rq_iov = iov;
-       rqst.rq_nvec = 2;
+       rqst.rq_nvec = 1;
        rqst.rq_pages = wdata->pages;
        rqst.rq_offset = wdata->page_offset;
        rqst.rq_npages = wdata->nr_pages;
@@ -3153,7 +3347,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
        rqst.rq_tailsz = wdata->tailsz;
 #ifdef CONFIG_CIFS_SMB_DIRECT
        if (wdata->mr) {
-               iov[1].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+               iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
                rqst.rq_npages = 0;
        }
 #endif
@@ -3210,6 +3404,7 @@ int
 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
           unsigned int *nbytes, struct kvec *iov, int n_vec)
 {
+       struct smb_rqst rqst;
        int rc = 0;
        struct smb2_write_req *req = NULL;
        struct smb2_write_rsp *rsp = NULL;
@@ -3251,7 +3446,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        /* 1 for Buffer */
        iov[0].iov_len = total_len - 1;
 
-       rc = smb2_send_recv(xid, io_parms->tcon->ses, iov, n_vec + 1,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_vec + 1;
+
+       rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
                            &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -3323,6 +3522,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, int index,
                     struct cifs_search_info *srch_inf)
 {
+       struct smb_rqst rqst;
        struct smb2_query_directory_req *req;
        struct smb2_query_directory_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -3395,7 +3595,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        iov[1].iov_base = (char *)(req->Buffer);
        iov[1].iov_len = len;
 
-       rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
 
@@ -3454,6 +3658,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
               u8 info_type, u32 additional_info, unsigned int num,
                void **data, unsigned int *size)
 {
+       struct smb_rqst rqst;
        struct smb2_set_info_req *req;
        struct smb2_set_info_rsp *rsp = NULL;
        struct kvec *iov;
@@ -3509,7 +3714,11 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
                iov[i].iov_len = size[i];
        }
 
-       rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = num;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
@@ -3664,6 +3873,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
                  const u64 persistent_fid, const u64 volatile_fid,
                  __u8 oplock_level)
 {
+       struct smb_rqst rqst;
        int rc;
        struct smb2_oplock_break *req = NULL;
        struct cifs_ses *ses = tcon->ses;
@@ -3692,7 +3902,11 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc) {
@@ -3755,6 +3969,7 @@ int
 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
              u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov;
        struct kvec rsp_iov;
@@ -3773,7 +3988,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = &iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(iov.iov_base);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3798,6 +4017,7 @@ int
 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
              u64 persistent_fid, u64 volatile_fid, int level)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov;
        struct kvec rsp_iov;
@@ -3829,7 +4049,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = &iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(iov.iov_base);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3868,6 +4092,7 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
           const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
           const __u32 num_lock, struct smb2_lock_element *buf)
 {
+       struct smb_rqst rqst;
        int rc = 0;
        struct smb2_lock_req *req = NULL;
        struct kvec iov[2];
@@ -3900,7 +4125,12 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
        iov[1].iov_len = count;
 
        cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
-       rc = smb2_send_recv(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
+
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        if (rc) {
@@ -3934,6 +4164,7 @@ int
 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
                 __u8 *lease_key, const __le32 lease_state)
 {
+       struct smb_rqst rqst;
        int rc;
        struct smb2_lease_ack *req = NULL;
        struct cifs_ses *ses = tcon->ses;
@@ -3964,7 +4195,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc) {
index a345560001ced354c550d6ab2f507a18d72ff9d2..824dddeee3f2dc8723d6ce9ac792e9de71358796 100644 (file)
@@ -851,8 +851,11 @@ struct validate_negotiate_info_rsp {
        __le16 Dialect; /* Dialect in use for the connection */
 } __packed;
 
-#define RSS_CAPABLE    0x00000001
-#define RDMA_CAPABLE   0x00000002
+#define RSS_CAPABLE    cpu_to_le32(0x00000001)
+#define RDMA_CAPABLE   cpu_to_le32(0x00000002)
+
+#define INTERNETWORK   cpu_to_le16(0x0002)
+#define INTERNETWORKV6 cpu_to_le16(0x0017)
 
 struct network_interface_info_ioctl_rsp {
        __le32 Next; /* next interface. zero if this is last one */
@@ -860,7 +863,21 @@ struct network_interface_info_ioctl_rsp {
        __le32 Capability; /* RSS or RDMA Capable */
        __le32 Reserved;
        __le64 LinkSpeed;
-       char    SockAddr_Storage[128];
+       __le16 Family;
+       __u8 Buffer[126];
+} __packed;
+
+struct iface_info_ipv4 {
+       __be16 Port;
+       __be32 IPv4Address;
+       __be64 Reserved;
+} __packed;
+
+struct iface_info_ipv6 {
+       __be16 Port;
+       __be32 FlowInfo;
+       __u8   IPv6Address[16];
+       __be32 ScopeId;
 } __packed;
 
 #define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
index c84020057bd816c31a69fd173746374c5c224c8a..3ae208ac2a770d2177036d60e86d0104a4de022a 100644 (file)
@@ -79,6 +79,10 @@ extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
                              struct cifs_sb_info *cifs_sb, bool set_alloc);
 extern int smb2_set_file_info(struct inode *inode, const char *full_path,
                              FILE_BASIC_INFO *buf, const unsigned int xid);
+extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+                              umode_t mode, struct cifs_tcon *tcon,
+                              const char *full_path,
+                              struct cifs_sb_info *cifs_sb);
 extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon,
                      const char *name, struct cifs_sb_info *cifs_sb);
 extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
@@ -109,6 +113,8 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
 extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
 extern void smb2_reconnect_server(struct work_struct *work);
 extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
+extern unsigned long
+smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker);
 
 /*
  * SMB2 Worker functions - most of protocol specific implementation details
index 349d5ccf854c26999ed8554f6d19cf64a89a33a3..51b9437c3c7b7cf60987170f7e46ac23d9ae98ac 100644 (file)
@@ -171,9 +171,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
        unsigned char *sigptr = smb2_signature;
        struct kvec *iov = rqst->rq_iov;
-       int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
-       struct smb2_sync_hdr *shdr =
-               (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+       struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
        struct cifs_ses *ses;
 
        ses = smb2_find_smb_ses(server, shdr->SessionId);
@@ -204,7 +202,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
                return rc;
        }
 
-       rc = __cifs_calc_signature(rqst, iov_hdr_index,  server, sigptr,
+       rc = __cifs_calc_signature(rqst, server, sigptr,
                &server->secmech.sdeschmacsha256->shash);
 
        if (!rc)
@@ -414,9 +412,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        unsigned char smb3_signature[SMB2_CMACAES_SIZE];
        unsigned char *sigptr = smb3_signature;
        struct kvec *iov = rqst->rq_iov;
-       int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
-       struct smb2_sync_hdr *shdr =
-               (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+       struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
        struct cifs_ses *ses;
 
        ses = smb2_find_smb_ses(server, shdr->SessionId);
@@ -447,7 +443,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
                return rc;
        }
 
-       rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
+       rc = __cifs_calc_signature(rqst, server, sigptr,
                                   &server->secmech.sdesccmacaes->shash);
 
        if (!rc)
@@ -462,7 +458,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
        int rc = 0;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
 
        if (!(shdr->Flags & SMB2_FLAGS_SIGNED) ||
            server->tcpStatus == CifsNeedNegotiate)
@@ -635,7 +631,7 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
 {
        int rc;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
        struct mid_q_entry *mid;
 
        smb2_seq_num_into_buf(ses->server, shdr);
@@ -656,7 +652,7 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
        int rc;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
        struct mid_q_entry *mid;
 
        smb2_seq_num_into_buf(server, shdr);
index e459c97151b34e684dc3f3cbbc36772fee5aaee5..6fd94d9ffac21ad6d8c4fbc436d2b8afa063aede 100644 (file)
@@ -18,6 +18,7 @@
 #include "smbdirect.h"
 #include "cifs_debug.h"
 #include "cifsproto.h"
+#include "smb2proto.h"
 
 static struct smbd_response *get_empty_queue_buffer(
                struct smbd_connection *info);
@@ -2087,7 +2088,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
        struct kvec vec;
        int nvecs;
        int size;
-       unsigned int buflen = 0, remaining_data_length;
+       unsigned int buflen, remaining_data_length;
        int start, i, j;
        int max_iov_size =
                info->max_send_size - sizeof(struct smbd_data_transfer);
@@ -2111,25 +2112,13 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
                return -EINVAL;
        }
-       iov = &rqst->rq_iov[1];
-
-       /* total up iov array first */
-       for (i = 0; i < rqst->rq_nvec-1; i++) {
-               buflen += iov[i].iov_len;
-       }
 
        /*
         * Add in the page array if there is one. The caller needs to set
         * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
         * ends at page boundary
         */
-       if (rqst->rq_npages) {
-               if (rqst->rq_npages == 1)
-                       buflen += rqst->rq_tailsz;
-               else
-                       buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
-                                       rqst->rq_offset + rqst->rq_tailsz;
-       }
+       buflen = smb2_rqst_len(rqst, true);
 
        if (buflen + sizeof(struct smbd_data_transfer) >
                info->max_fragmented_send_size) {
@@ -2139,6 +2128,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                goto done;
        }
 
+       iov = &rqst->rq_iov[1];
+
        cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
        for (i = 0; i < rqst->rq_nvec-1; i++)
                dump_smb(iov[i].iov_base, iov[i].iov_len);
index 61e74d455d90625339591a3b47560f5bdb50c343..67e413f6ee4d8fd1dbd1eede0a7b0a9e6442a9e3 100644 (file)
@@ -378,7 +378,7 @@ DEFINE_EVENT(smb3_open_err_class, smb3_##name,    \
        TP_ARGS(xid, tid, sesid, create_options, desired_access, rc))
 
 DEFINE_SMB3_OPEN_ERR_EVENT(open_err);
-
+DEFINE_SMB3_OPEN_ERR_EVENT(posix_mkdir_err);
 
 DECLARE_EVENT_CLASS(smb3_open_done_class,
        TP_PROTO(unsigned int xid,
@@ -420,6 +420,7 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name,  \
        TP_ARGS(xid, fid, tid, sesid, create_options, desired_access))
 
 DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
+DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
 
 #endif /* _CIFS_TRACE_H */
 
index 1f1a68f8911001bae86976171e44a09402982d92..fb57dfbfb749973c1c72423a93d76442c0a07fad 100644 (file)
@@ -201,15 +201,24 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
        return 0;
 }
 
-static unsigned long
-rqst_len(struct smb_rqst *rqst)
+unsigned long
+smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker)
 {
        unsigned int i;
-       struct kvec *iov = rqst->rq_iov;
+       struct kvec *iov;
+       int nvec;
        unsigned long buflen = 0;
 
+       if (skip_rfc1002_marker && rqst->rq_iov[0].iov_len == 4) {
+               iov = &rqst->rq_iov[1];
+               nvec = rqst->rq_nvec - 1;
+       } else {
+               iov = rqst->rq_iov;
+               nvec = rqst->rq_nvec;
+       }
+
        /* total up iov array first */
-       for (i = 0; i < rqst->rq_nvec; i++)
+       for (i = 0; i < nvec; i++)
                buflen += iov[i].iov_len;
 
        /*
@@ -236,18 +245,20 @@ rqst_len(struct smb_rqst *rqst)
 }
 
 static int
-__smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+               struct smb_rqst *rqst)
 {
-       int rc;
-       struct kvec *iov = rqst->rq_iov;
-       int n_vec = rqst->rq_nvec;
-       unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
-       unsigned long send_length;
-       unsigned int i;
+       int rc = 0;
+       struct kvec *iov;
+       int n_vec;
+       unsigned int send_length = 0;
+       unsigned int i, j;
        size_t total_len = 0, sent, size;
        struct socket *ssocket = server->ssocket;
        struct msghdr smb_msg;
        int val = 1;
+       __be32 rfc1002_marker;
+
        if (cifs_rdma_enabled(server) && server->smbd_conn) {
                rc = smbd_send(server->smbd_conn, rqst);
                goto smbd_done;
@@ -255,51 +266,67 @@ __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
        if (ssocket == NULL)
                return -ENOTSOCK;
 
-       /* sanity check send length */
-       send_length = rqst_len(rqst);
-       if (send_length != smb_buf_length + 4) {
-               WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
-                       send_length, smb_buf_length);
-               return -EIO;
-       }
-
-       if (n_vec < 2)
-               return -EIO;
-
-       cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
-       dump_smb(iov[0].iov_base, iov[0].iov_len);
-       dump_smb(iov[1].iov_base, iov[1].iov_len);
-
        /* cork the socket */
        kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
                                (char *)&val, sizeof(val));
 
-       size = 0;
-       for (i = 0; i < n_vec; i++)
-               size += iov[i].iov_len;
+       for (j = 0; j < num_rqst; j++)
+               send_length += smb2_rqst_len(&rqst[j], true);
+       rfc1002_marker = cpu_to_be32(send_length);
 
-       iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
+       /* Generate a rfc1002 marker for SMB2+ */
+       if (server->vals->header_preamble_size == 0) {
+               struct kvec hiov = {
+                       .iov_base = &rfc1002_marker,
+                       .iov_len  = 4
+               };
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
+                             1, 4);
+               rc = smb_send_kvec(server, &smb_msg, &sent);
+               if (rc < 0)
+                       goto uncork;
 
-       rc = smb_send_kvec(server, &smb_msg, &sent);
-       if (rc < 0)
-               goto uncork;
+               total_len += sent;
+               send_length += 4;
+       }
 
-       total_len += sent;
+       cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
 
-       /* now walk the page array and send each page in it */
-       for (i = 0; i < rqst->rq_npages; i++) {
-               struct bio_vec bvec;
+       for (j = 0; j < num_rqst; j++) {
+               iov = rqst[j].rq_iov;
+               n_vec = rqst[j].rq_nvec;
 
-               bvec.bv_page = rqst->rq_pages[i];
-               rqst_page_get_length(rqst, i, &bvec.bv_len, &bvec.bv_offset);
+               size = 0;
+               for (i = 0; i < n_vec; i++) {
+                       dump_smb(iov[i].iov_base, iov[i].iov_len);
+                       size += iov[i].iov_len;
+               }
+
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
+                             iov, n_vec, size);
 
-               iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
-                             &bvec, 1, bvec.bv_len);
                rc = smb_send_kvec(server, &smb_msg, &sent);
                if (rc < 0)
-                       break;
+                       goto uncork;
 
                total_len += sent;
+
+               /* now walk the page array and send each page in it */
+               for (i = 0; i < rqst[j].rq_npages; i++) {
+                       struct bio_vec bvec;
+
+                       bvec.bv_page = rqst[j].rq_pages[i];
+                       rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
+                                            &bvec.bv_offset);
+
+                       iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
+                                     &bvec, 1, bvec.bv_len);
+                       rc = smb_send_kvec(server, &smb_msg, &sent);
+                       if (rc < 0)
+                               break;
+
+                       total_len += sent;
+               }
        }
 
 uncork:
@@ -308,9 +335,9 @@ __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
        kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
                                (char *)&val, sizeof(val));
 
-       if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
+       if ((total_len > 0) && (total_len != send_length)) {
                cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
-                        smb_buf_length + 4, total_len);
+                        send_length, total_len);
                /*
                 * If we have only sent part of an SMB then the next SMB could
                 * be taken as the remainder of this one. We need to kill the
@@ -335,7 +362,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
        int rc;
 
        if (!(flags & CIFS_TRANSFORM_REQ))
-               return __smb_send_rqst(server, rqst);
+               return __smb_send_rqst(server, 1, rqst);
 
        if (!server->ops->init_transform_rq ||
            !server->ops->free_transform_rq) {
@@ -347,7 +374,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
        if (rc)
                return rc;
 
-       rc = __smb_send_rqst(server, &cur_rqst);
+       rc = __smb_send_rqst(server, 1, &cur_rqst);
        server->ops->free_transform_rq(&cur_rqst);
        return rc;
 }
@@ -365,7 +392,7 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
        iov[1].iov_base = (char *)smb_buffer + 4;
        iov[1].iov_len = smb_buf_length;
 
-       return __smb_send_rqst(server, &rqst);
+       return __smb_send_rqst(server, 1, &rqst);
 }
 
 static int
@@ -730,7 +757,6 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
         * to the same server. We may make this configurable later or
         * use ses->maxReq.
         */
-
        rc = wait_for_free_request(ses->server, timeout, optype);
        if (rc)
                return rc;
@@ -766,8 +792,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 
 #ifdef CONFIG_CIFS_SMB311
        if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
-               smb311_update_preauth_hash(ses, rqst->rq_iov+1,
-                                          rqst->rq_nvec-1);
+               smb311_update_preauth_hash(ses, rqst->rq_iov,
+                                          rqst->rq_nvec);
 #endif
 
        if (timeout == CIFS_ASYNC_OP)
@@ -812,8 +838,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 #ifdef CONFIG_CIFS_SMB311
        if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
                struct kvec iov = {
-                       .iov_base = buf,
-                       .iov_len = midQ->resp_buf_size
+                       .iov_base = resp_iov->iov_base,
+                       .iov_len = resp_iov->iov_len
                };
                smb311_update_preauth_hash(ses, &iov, 1);
        }
@@ -872,49 +898,6 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
        return rc;
 }
 
-/* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
-int
-smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
-              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
-              const int flags, struct kvec *resp_iov)
-{
-       struct smb_rqst rqst;
-       struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
-       int rc;
-       int i;
-       __u32 count;
-       __be32 rfc1002_marker;
-
-       if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
-               new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
-                                       GFP_KERNEL);
-               if (!new_iov)
-                       return -ENOMEM;
-       } else
-               new_iov = s_iov;
-
-       /* 1st iov is an RFC1002 Session Message length */
-       memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
-
-       count = 0;
-       for (i = 1; i < n_vec + 1; i++)
-               count += new_iov[i].iov_len;
-
-       rfc1002_marker = cpu_to_be32(count);
-
-       new_iov[0].iov_base = &rfc1002_marker;
-       new_iov[0].iov_len = 4;
-
-       memset(&rqst, 0, sizeof(struct smb_rqst));
-       rqst.rq_iov = new_iov;
-       rqst.rq_nvec = n_vec + 1;
-
-       rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
-       if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
-               kfree(new_iov);
-       return rc;
-}
-
 int
 SendReceive(const unsigned int xid, struct cifs_ses *ses,
            struct smb_hdr *in_buf, struct smb_hdr *out_buf,
index ceb1031f1cac948e74a970f02058cfeb52d7a351..08d3bd602f73d8f219ee1f259c0cbaa839245c56 100644 (file)
@@ -101,20 +101,14 @@ static int eventfd_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static struct wait_queue_head *
-eventfd_get_poll_head(struct file *file, __poll_t events)
-{
-       struct eventfd_ctx *ctx = file->private_data;
-
-       return &ctx->wqh;
-}
-
-static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t eventfd_poll(struct file *file, poll_table *wait)
 {
        struct eventfd_ctx *ctx = file->private_data;
        __poll_t events = 0;
        u64 count;
 
+       poll_wait(file, &ctx->wqh, wait);
+
        /*
         * All writes to ctx->count occur within ctx->wqh.lock.  This read
         * can be done outside ctx->wqh.lock because we know that poll_wait
@@ -156,11 +150,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
        count = READ_ONCE(ctx->count);
 
        if (count > 0)
-               events |= (EPOLLIN & eventmask);
+               events |= EPOLLIN;
        if (count == ULLONG_MAX)
                events |= EPOLLERR;
        if (ULLONG_MAX - 1 > count)
-               events |= (EPOLLOUT & eventmask);
+               events |= EPOLLOUT;
 
        return events;
 }
@@ -311,8 +305,7 @@ static const struct file_operations eventfd_fops = {
        .show_fdinfo    = eventfd_show_fdinfo,
 #endif
        .release        = eventfd_release,
-       .get_poll_head  = eventfd_get_poll_head,
-       .poll_mask      = eventfd_poll_mask,
+       .poll           = eventfd_poll,
        .read           = eventfd_read,
        .write          = eventfd_write,
        .llseek         = noop_llseek,
index ea4436f409fb005a16edeca3f49f29f955db0171..67db22fe99c5ce8bf0ba606c0a45f221cbf69b38 100644 (file)
@@ -922,18 +922,14 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
        return 0;
 }
 
-static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file,
-               __poll_t eventmask)
-{
-       struct eventpoll *ep = file->private_data;
-       return &ep->poll_wait;
-}
-
-static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
 {
        struct eventpoll *ep = file->private_data;
        int depth = 0;
 
+       /* Insert inside our poll wait queue */
+       poll_wait(file, &ep->poll_wait, wait);
+
        /*
         * Proceed to find out if wanted events are really available inside
         * the ready list.
@@ -972,8 +968,7 @@ static const struct file_operations eventpoll_fops = {
        .show_fdinfo    = ep_show_fdinfo,
 #endif
        .release        = ep_eventpoll_release,
-       .get_poll_head  = ep_eventpoll_get_poll_head,
-       .poll_mask      = ep_eventpoll_poll_mask,
+       .poll           = ep_eventpoll_poll,
        .llseek         = noop_llseek,
 };
 
index cc40802ddfa856d14aefc8ef75ec9e61b89864b0..00e759f051619cfd37a58108265bc9f798554a21 100644 (file)
@@ -748,7 +748,6 @@ extern void ext2_free_blocks (struct inode *, unsigned long,
                              unsigned long);
 extern unsigned long ext2_count_free_blocks (struct super_block *);
 extern unsigned long ext2_count_dirs (struct super_block *);
-extern void ext2_check_blocks_bitmap (struct super_block *);
 extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
                                                    unsigned int block_group,
                                                    struct buffer_head ** bh);
@@ -771,7 +770,6 @@ extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page
 extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *);
 extern void ext2_free_inode (struct inode *);
 extern unsigned long ext2_count_free_inodes (struct super_block *);
-extern void ext2_check_inodes_bitmap (struct super_block *);
 extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
 
 /* inode.c */
index 25ab1274090f8532254e783def084bccd24a21c4..8ff53f8da3bcc414fdad44ac3bb76a88258e4d51 100644 (file)
@@ -557,6 +557,9 @@ static int parse_options(char *options, struct super_block *sb,
                        set_opt (opts->s_mount_opt, NO_UID32);
                        break;
                case Opt_nocheck:
+                       ext2_msg(sb, KERN_WARNING,
+                               "Option nocheck/check=none is deprecated and"
+                               " will be removed in June 2020.");
                        clear_opt (opts->s_mount_opt, CHECK);
                        break;
                case Opt_debug:
@@ -1335,9 +1338,6 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
        new_opts.s_resgid = sbi->s_resgid;
        spin_unlock(&sbi->s_lock);
 
-       /*
-        * Allow the "check" option to be passed as a remount option.
-        */
        if (!parse_options(data, sb, &new_opts))
                return -EINVAL;
 
index c60f3d32ee911192c0cd8dae3b7cb11c0f416411..a6797986b625a34d19e097050c58f582c177c30c 100644 (file)
@@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
        if (size > PSIZE) {
                /*
                 * To keep the rest of the code simple.  Allocate a
-                * contiguous buffer to work with
+                * contiguous buffer to work with. Make the buffer large
+                * enough to make use of the whole extent.
                 */
-               ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+               ea_buf->max_size = (size + sb->s_blocksize - 1) &
+                   ~(sb->s_blocksize - 1);
+
+               ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
                if (ea_buf->xattr == NULL)
                        return -ENOMEM;
 
                ea_buf->flag = EA_MALLOC;
-               ea_buf->max_size = (size + sb->s_blocksize - 1) &
-                   ~(sb->s_blocksize - 1);
 
                if (ea_size == 0)
                        return 0;
index bbd0465535ebd9e433a812d60ab345161ef736b3..f033f3a69a3bcf7259192a9e062d7af295f90639 100644 (file)
@@ -883,8 +883,10 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
        rcu_read_lock();
        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
                res = nfs_delegation_find_inode_server(server, fhandle);
-               if (res != ERR_PTR(-ENOENT))
+               if (res != ERR_PTR(-ENOENT)) {
+                       rcu_read_unlock();
                        return res;
+               }
        }
        rcu_read_unlock();
        return ERR_PTR(-ENOENT);
index d4a07acad5989e1374f879f2cc46c284f9aa8c4f..8f003792ccde1c24c3bcd444a609b888a629340f 100644 (file)
@@ -1243,17 +1243,18 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
                                           hdr->ds_clp, hdr->lseg,
                                           hdr->pgio_mirror_idx);
 
+       clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+       clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
        switch (err) {
        case -NFS4ERR_RESET_TO_PNFS:
                if (ff_layout_choose_best_ds_for_read(hdr->lseg,
                                        hdr->pgio_mirror_idx + 1,
                                        &hdr->pgio_mirror_idx))
                        goto out_eagain;
-               ff_layout_read_record_layoutstats_done(task, hdr);
-               pnfs_read_resend_pnfs(hdr);
+               set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
        case -NFS4ERR_RESET_TO_MDS:
-               ff_layout_reset_read(hdr);
+               set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
                return task->tk_status;
        case -EAGAIN:
                goto out_eagain;
@@ -1403,6 +1404,10 @@ static void ff_layout_read_release(void *data)
        struct nfs_pgio_header *hdr = data;
 
        ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
+       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+               pnfs_read_resend_pnfs(hdr);
+       else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+               ff_layout_reset_read(hdr);
        pnfs_generic_rw_release(data);
 }
 
@@ -1423,12 +1428,14 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
                                           hdr->ds_clp, hdr->lseg,
                                           hdr->pgio_mirror_idx);
 
+       clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+       clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
        switch (err) {
        case -NFS4ERR_RESET_TO_PNFS:
-               ff_layout_reset_write(hdr, true);
+               set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
        case -NFS4ERR_RESET_TO_MDS:
-               ff_layout_reset_write(hdr, false);
+               set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
                return task->tk_status;
        case -EAGAIN:
                return -EAGAIN;
@@ -1575,6 +1582,10 @@ static void ff_layout_write_release(void *data)
        struct nfs_pgio_header *hdr = data;
 
        ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
+       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+               ff_layout_reset_write(hdr, true);
+       else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+               ff_layout_reset_write(hdr, false);
        pnfs_generic_rw_release(data);
 }
 
index ed45090e4df6471902f5968b908429fe28976280..6dd146885da99304c8183f5fae21741f4aa3625f 100644 (file)
@@ -3294,6 +3294,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
        struct nfs4_closedata *calldata = data;
        struct nfs4_state *state = calldata->state;
        struct inode *inode = calldata->inode;
+       struct pnfs_layout_hdr *lo;
        bool is_rdonly, is_wronly, is_rdwr;
        int call_close = 0;
 
@@ -3337,6 +3338,12 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                goto out_wait;
        }
 
+       lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
+       if (lo && !pnfs_layout_is_valid(lo)) {
+               calldata->arg.lr_args = NULL;
+               calldata->res.lr_res = NULL;
+       }
+
        if (calldata->arg.fmode == 0)
                task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
 
@@ -5972,12 +5979,19 @@ static void nfs4_delegreturn_release(void *calldata)
 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
 {
        struct nfs4_delegreturndata *d_data;
+       struct pnfs_layout_hdr *lo;
 
        d_data = (struct nfs4_delegreturndata *)data;
 
        if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
                return;
 
+       lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
+       if (lo && !pnfs_layout_is_valid(lo)) {
+               d_data->args.lr_args = NULL;
+               d_data->res.lr_res = NULL;
+       }
+
        nfs4_setup_sequence(d_data->res.server->nfs_client,
                        &d_data->args.seq_args,
                        &d_data->res.seq_res,
@@ -8650,6 +8664,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
 
        dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
 
+       nfs4_sequence_free_slot(&lgp->res.seq_res);
+
        switch (nfs4err) {
        case 0:
                goto out;
@@ -8714,7 +8730,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                goto out;
        }
 
-       nfs4_sequence_free_slot(&lgp->res.seq_res);
        err = nfs4_handle_exception(server, nfs4err, exception);
        if (!status) {
                if (exception->retry)
@@ -8786,20 +8801,22 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
        if (IS_ERR(task))
                return ERR_CAST(task);
        status = rpc_wait_for_completion_task(task);
-       if (status == 0) {
+       if (status != 0)
+               goto out;
+
+       /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
+       if (task->tk_status < 0 || lgp->res.layoutp->len == 0) {
                status = nfs4_layoutget_handle_exception(task, lgp, &exception);
                *timeout = exception.timeout;
-       }
-
+       } else
+               lseg = pnfs_layout_process(lgp);
+out:
        trace_nfs4_layoutget(lgp->args.ctx,
                        &lgp->args.range,
                        &lgp->res.range,
                        &lgp->res.stateid,
                        status);
 
-       /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
-       if (status == 0 && lgp->res.layoutp->len)
-               lseg = pnfs_layout_process(lgp);
        rpc_put_task(task);
        dprintk("<-- %s status=%d\n", __func__, status);
        if (status)
@@ -8817,6 +8834,8 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
                        &lrp->args.seq_args,
                        &lrp->res.seq_res,
                        task);
+       if (!pnfs_layout_is_valid(lrp->args.layout))
+               rpc_exit(task, 0);
 }
 
 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
index a8f5e6b167491e3746a921f1f611fbb06b7d5f45..3fe81424337d07b5b19ab77d08825fb27bf523b0 100644 (file)
@@ -801,6 +801,11 @@ static inline void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
 {
 }
 
+static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo)
+{
+       return false;
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 #if IS_ENABLED(CONFIG_NFS_V4_2)
index bb0840e234f3bc176d2af120d6ed94ee3720aad0..39d6f431da83f4227fbfbee1b6931230aa82e95c 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -509,22 +509,19 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        }
 }
 
-static struct wait_queue_head *
-pipe_get_poll_head(struct file *filp, __poll_t events)
-{
-       struct pipe_inode_info *pipe = filp->private_data;
-
-       return &pipe->wait;
-}
-
 /* No kernel lock held - fine */
-static __poll_t pipe_poll_mask(struct file *filp, __poll_t events)
+static __poll_t
+pipe_poll(struct file *filp, poll_table *wait)
 {
+       __poll_t mask;
        struct pipe_inode_info *pipe = filp->private_data;
-       int nrbufs = pipe->nrbufs;
-       __poll_t mask = 0;
+       int nrbufs;
+
+       poll_wait(filp, &pipe->wait, wait);
 
        /* Reading only -- no need for acquiring the semaphore.  */
+       nrbufs = pipe->nrbufs;
+       mask = 0;
        if (filp->f_mode & FMODE_READ) {
                mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
                if (!pipe->writers && filp->f_version != pipe->w_counter)
@@ -1023,8 +1020,7 @@ const struct file_operations pipefifo_fops = {
        .llseek         = no_llseek,
        .read_iter      = pipe_read,
        .write_iter     = pipe_write,
-       .get_poll_head  = pipe_get_poll_head,
-       .poll_mask      = pipe_poll_mask,
+       .poll           = pipe_poll,
        .unlocked_ioctl = pipe_ioctl,
        .release        = pipe_release,
        .fasync         = pipe_fasync,
index b6572944efc340d89f136c5a9c17ac409c8bef00..aaffc0c302162db0fc9d682c071469f55326dc1d 100644 (file)
@@ -235,6 +235,10 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        if (env_start != arg_end || env_start >= env_end)
                env_start = env_end = arg_end;
 
+       /* .. and limit it to a maximum of one page of slop */
+       if (env_end >= arg_end + PAGE_SIZE)
+               env_end = arg_end + PAGE_SIZE - 1;
+
        /* We're not going to care if "*ppos" has high bits set */
        pos = arg_start + *ppos;
 
@@ -254,10 +258,19 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        while (count) {
                int got;
                size_t size = min_t(size_t, PAGE_SIZE, count);
+               long offset;
 
-               got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
-               if (got <= 0)
+               /*
+                * Are we already starting past the official end?
+                * We always include the last byte that is *supposed*
+                * to be NUL
+                */
+               offset = (pos >= arg_end) ? pos - arg_end + 1 : 0;
+
+               got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON);
+               if (got <= offset)
                        break;
+               got -= offset;
 
                /* Don't walk past a NUL character once you hit arg_end */
                if (pos + got >= arg_end) {
@@ -276,12 +289,17 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
                                n = arg_end - pos - 1;
 
                        /* Cut off at first NUL after 'n' */
-                       got = n + strnlen(page+n, got-n);
-                       if (!got)
+                       got = n + strnlen(page+n, offset+got-n);
+                       if (got < offset)
                                break;
+                       got -= offset;
+
+                       /* Include the NUL if it existed */
+                       if (got < size)
+                               got++;
                }
 
-               got -= copy_to_user(buf, page, got);
+               got -= copy_to_user(buf, page+offset, got);
                if (unlikely(!got)) {
                        if (!len)
                                len = -EFAULT;
index 6ac1c92997ea2a20c3af8959c6920218f16a846d..bb1c1625b158d03f5c8685f55e370267e1cc76fb 100644 (file)
@@ -564,11 +564,20 @@ static int proc_seq_open(struct inode *inode, struct file *file)
        return seq_open(file, de->seq_ops);
 }
 
+static int proc_seq_release(struct inode *inode, struct file *file)
+{
+       struct proc_dir_entry *de = PDE(inode);
+
+       if (de->state_size)
+               return seq_release_private(inode, file);
+       return seq_release(inode, file);
+}
+
 static const struct file_operations proc_seq_fops = {
        .open           = proc_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = seq_release,
+       .release        = proc_seq_release,
 };
 
 struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
index d88231e3b2be3ec1bc1f85c3c2fd92973e312c15..fc20e06c56ba55bf229db78cb5b5077c21935931 100644 (file)
@@ -711,21 +711,18 @@ EXPORT_SYMBOL(dquot_quota_sync);
 static unsigned long
 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-       struct list_head *head;
        struct dquot *dquot;
        unsigned long freed = 0;
 
        spin_lock(&dq_list_lock);
-       head = free_dquots.prev;
-       while (head != &free_dquots && sc->nr_to_scan) {
-               dquot = list_entry(head, struct dquot, dq_free);
+       while (!list_empty(&free_dquots) && sc->nr_to_scan) {
+               dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
                remove_dquot_hash(dquot);
                remove_free_dquot(dquot);
                remove_inuse(dquot);
                do_destroy_dquot(dquot);
                sc->nr_to_scan--;
                freed++;
-               head = free_dquots.prev;
        }
        spin_unlock(&dq_list_lock);
        return freed;
index 317891ff8165ba19b775fcfaa8f6deccb58ba18f..4a6b6e4b21cb91aecdf40492c4763f09bf4ccc3f 100644 (file)
 
 #include <linux/uaccess.h>
 
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
-{
-       if (file->f_op->poll) {
-               return file->f_op->poll(file, pt);
-       } else if (file_has_poll_mask(file)) {
-               unsigned int events = poll_requested_events(pt);
-               struct wait_queue_head *head;
-
-               if (pt && pt->_qproc) {
-                       head = file->f_op->get_poll_head(file, events);
-                       if (!head)
-                               return DEFAULT_POLLMASK;
-                       if (IS_ERR(head))
-                               return EPOLLERR;
-                       pt->_qproc(file, head, pt);
-               }
-
-               return file->f_op->poll_mask(file, events);
-       } else {
-               return DEFAULT_POLLMASK;
-       }
-}
-EXPORT_SYMBOL_GPL(vfs_poll);
 
 /*
  * Estimate expected accuracy in ns from a timeval.
index d84a2bee4f82b2f8470b7f2fbd42b2f33beb2bce..cdad49da3ff710e6fd2cc1adf4bf4877623af670 100644 (file)
@@ -226,20 +226,21 @@ static int timerfd_release(struct inode *inode, struct file *file)
        kfree_rcu(ctx, rcu);
        return 0;
 }
-       
-static struct wait_queue_head *timerfd_get_poll_head(struct file *file,
-               __poll_t eventmask)
+
+static __poll_t timerfd_poll(struct file *file, poll_table *wait)
 {
        struct timerfd_ctx *ctx = file->private_data;
+       __poll_t events = 0;
+       unsigned long flags;
 
-       return &ctx->wqh;
-}
+       poll_wait(file, &ctx->wqh, wait);
 
-static __poll_t timerfd_poll_mask(struct file *file, __poll_t eventmask)
-{
-       struct timerfd_ctx *ctx = file->private_data;
+       spin_lock_irqsave(&ctx->wqh.lock, flags);
+       if (ctx->ticks)
+               events |= EPOLLIN;
+       spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
-       return ctx->ticks ? EPOLLIN : 0;
+       return events;
 }
 
 static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
@@ -363,8 +364,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
 
 static const struct file_operations timerfd_fops = {
        .release        = timerfd_release,
-       .get_poll_head  = timerfd_get_poll_head,
-       .poll_mask      = timerfd_poll_mask,
+       .poll           = timerfd_poll,
        .read           = timerfd_read,
        .llseek         = noop_llseek,
        .show_fdinfo    = timerfd_show,
index 1b961b1d9699461cdf0771a90b4771078f6c95fc..fcda0fc97b90a14fd53aafbeb15885d85716e3a1 100644 (file)
@@ -533,8 +533,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
                        udf_write_aext(table, &epos, &eloc,
                                        (etype << 30) | elen, 1);
                } else
-                       udf_delete_aext(table, epos, eloc,
-                                       (etype << 30) | elen);
+                       udf_delete_aext(table, epos);
        } else {
                alloc_count = 0;
        }
@@ -630,7 +629,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb,
        if (goal_elen)
                udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
        else
-               udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
+               udf_delete_aext(table, goal_epos);
        brelse(goal_epos.bh);
 
        udf_add_free_space(sb, partition, -1);
index 0a98a2369738fc2cff925c80066b92a58b299066..d9523013096f978c9d4a3ca1d8fdd23b55eeb275 100644 (file)
@@ -141,10 +141,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                               fibh->ebh->b_data,
                               sizeof(struct fileIdentDesc) + fibh->soffset);
 
-                       fi_len = (sizeof(struct fileIdentDesc) +
-                                 cfi->lengthFileIdent +
-                                 le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
-
+                       fi_len = udf_dir_entry_len(cfi);
                        *nf_pos += fi_len - (fibh->eoffset - fibh->soffset);
                        fibh->eoffset = fibh->soffset + fi_len;
                } else {
@@ -152,6 +149,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                               sizeof(struct fileIdentDesc));
                }
        }
+       /* Got last entry outside of dir size - fs is corrupted! */
+       if (*nf_pos > dir->i_size)
+               return NULL;
        return fi;
 }
 
index 7f39d17352c9697863f02140f7cf7ec1120a2215..9915a58fbabd7ff0194709ec883c1bd7003d72c7 100644 (file)
@@ -1147,8 +1147,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr
 
        if (startnum > endnum) {
                for (i = 0; i < (startnum - endnum); i++)
-                       udf_delete_aext(inode, *epos, laarr[i].extLocation,
-                                       laarr[i].extLength);
+                       udf_delete_aext(inode, *epos);
        } else if (startnum < endnum) {
                for (i = 0; i < (endnum - startnum); i++) {
                        udf_insert_aext(inode, *epos, laarr[i].extLocation,
@@ -2176,14 +2175,15 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
        return (nelen >> 30);
 }
 
-int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
-                      struct kernel_lb_addr eloc, uint32_t elen)
+int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
 {
        struct extent_position oepos;
        int adsize;
        int8_t etype;
        struct allocExtDesc *aed;
        struct udf_inode_info *iinfo;
+       struct kernel_lb_addr eloc;
+       uint32_t elen;
 
        if (epos.bh) {
                get_bh(epos.bh);
index c586026508db82d0a27a1df1b964bcbf3fcec45c..06f37ddd2997f4894859722fb7f801994e91239d 100644 (file)
@@ -351,8 +351,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
        loff_t f_pos;
        loff_t size = udf_ext0_offset(dir) + dir->i_size;
        int nfidlen;
-       uint8_t lfi;
-       uint16_t liu;
        udf_pblk_t block;
        struct kernel_lb_addr eloc;
        uint32_t elen = 0;
@@ -383,7 +381,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
                namelen = 0;
        }
 
-       nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
+       nfidlen = ALIGN(sizeof(struct fileIdentDesc) + namelen, UDF_NAME_PAD);
 
        f_pos = udf_ext0_offset(dir);
 
@@ -424,12 +422,8 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
                        goto out_err;
                }
 
-               liu = le16_to_cpu(cfi->lengthOfImpUse);
-               lfi = cfi->lengthFileIdent;
-
                if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
-                       if (((sizeof(struct fileIdentDesc) +
-                                       liu + lfi + 3) & ~3) == nfidlen) {
+                       if (udf_dir_entry_len(cfi) == nfidlen) {
                                cfi->descTag.tagSerialNum = cpu_to_le16(1);
                                cfi->fileVersionNum = cpu_to_le16(1);
                                cfi->fileCharacteristics = 0;
@@ -1201,9 +1195,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        if (dir_fi) {
                dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
-               udf_update_tag((char *)dir_fi,
-                               (sizeof(struct fileIdentDesc) +
-                               le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
+               udf_update_tag((char *)dir_fi, udf_dir_entry_len(dir_fi));
                if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
                        mark_inode_dirty(old_inode);
                else
index bae311b59400459338d2c60f9e962429066e1483..84c47dde4d268a12e8f1aeaf2c6e6dda91db4972 100644 (file)
@@ -132,6 +132,12 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
 extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
                        struct fileIdentDesc *, struct udf_fileident_bh *,
                        uint8_t *, uint8_t *);
+static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi)
+{
+       return ALIGN(sizeof(struct fileIdentDesc) +
+               le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent,
+               UDF_NAME_PAD);
+}
 
 /* file.c */
 extern long udf_ioctl(struct file *, unsigned int, unsigned long);
@@ -167,8 +173,7 @@ extern int udf_add_aext(struct inode *, struct extent_position *,
                        struct kernel_lb_addr *, uint32_t, int);
 extern void udf_write_aext(struct inode *, struct extent_position *,
                           struct kernel_lb_addr *, uint32_t, int);
-extern int8_t udf_delete_aext(struct inode *, struct extent_position,
-                             struct kernel_lb_addr, uint32_t);
+extern int8_t udf_delete_aext(struct inode *, struct extent_position);
 extern int8_t udf_next_aext(struct inode *, struct extent_position *,
                            struct kernel_lb_addr *, uint32_t *, int);
 extern int8_t udf_current_aext(struct inode *, struct extent_position *,
index 84db76e0e3e3c58ae7d25b38a46a1ce2b4d5cae4..fecd187fcf2c3cd69bd79954ccc272737cb2ce2b 100644 (file)
@@ -157,6 +157,7 @@ __xfs_ag_resv_free(
        error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true);
        resv->ar_reserved = 0;
        resv->ar_asked = 0;
+       resv->ar_orig_reserved = 0;
 
        if (error)
                trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno,
@@ -189,13 +190,34 @@ __xfs_ag_resv_init(
        struct xfs_mount                *mp = pag->pag_mount;
        struct xfs_ag_resv              *resv;
        int                             error;
-       xfs_extlen_t                    reserved;
+       xfs_extlen_t                    hidden_space;
 
        if (used > ask)
                ask = used;
-       reserved = ask - used;
 
-       error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+       switch (type) {
+       case XFS_AG_RESV_RMAPBT:
+               /*
+                * Space taken by the rmapbt is not subtracted from fdblocks
+                * because the rmapbt lives in the free space.  Here we must
+                * subtract the entire reservation from fdblocks so that we
+                * always have blocks available for rmapbt expansion.
+                */
+               hidden_space = ask;
+               break;
+       case XFS_AG_RESV_METADATA:
+               /*
+                * Space taken by all other metadata btrees are accounted
+                * on-disk as used space.  We therefore only hide the space
+                * that is reserved but not used by the trees.
+                */
+               hidden_space = ask - used;
+               break;
+       default:
+               ASSERT(0);
+               return -EINVAL;
+       }
+       error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true);
        if (error) {
                trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
                                error, _RET_IP_);
@@ -216,7 +238,8 @@ __xfs_ag_resv_init(
 
        resv = xfs_perag_resv(pag, type);
        resv->ar_asked = ask;
-       resv->ar_reserved = resv->ar_orig_reserved = reserved;
+       resv->ar_orig_reserved = hidden_space;
+       resv->ar_reserved = ask - used;
 
        trace_xfs_ag_resv_init(pag, type, ask);
        return 0;
index 01628f0c9a0c227543087c70bd7391ad3f0eee2c..7205268b30bc54b488bf513b1a2b6bb737769d64 100644 (file)
@@ -5780,6 +5780,32 @@ xfs_bmap_collapse_extents(
        return error;
 }
 
+/* Make sure we won't be right-shifting an extent past the maximum bound. */
+int
+xfs_bmap_can_insert_extents(
+       struct xfs_inode        *ip,
+       xfs_fileoff_t           off,
+       xfs_fileoff_t           shift)
+{
+       struct xfs_bmbt_irec    got;
+       int                     is_empty;
+       int                     error = 0;
+
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+               return -EIO;
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
+       if (!error && !is_empty && got.br_startoff >= off &&
+           ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
+               error = -EINVAL;
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+       return error;
+}
+
 int
 xfs_bmap_insert_extents(
        struct xfs_trans        *tp,
index 99dddbd0fcc6c606e59544d69a0435b0cc205c5f..9b49ddf99c4115479fe8271cc5b492a2d86b2b70 100644 (file)
@@ -227,6 +227,8 @@ int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
                bool *done, xfs_fsblock_t *firstblock,
                struct xfs_defer_ops *dfops);
+int    xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
+               xfs_fileoff_t shift);
 int    xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
                bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
index 1c5a8aaf2bfcea6b51b76e7aa7dff4b55b4e4145..059bc44c27e83edf3cb1fe2c494490e65f93c5d8 100644 (file)
@@ -962,6 +962,9 @@ typedef enum xfs_dinode_fmt {
                XFS_DFORK_DSIZE(dip, mp) : \
                XFS_DFORK_ASIZE(dip, mp))
 
+#define XFS_DFORK_MAXEXT(dip, mp, w) \
+       (XFS_DFORK_SIZE(dip, mp, w) / sizeof(struct xfs_bmbt_rec))
+
 /*
  * Return pointers to the data or attribute forks.
  */
@@ -1526,6 +1529,8 @@ typedef struct xfs_bmdr_block {
 #define BMBT_STARTBLOCK_BITLEN 52
 #define BMBT_BLOCKCOUNT_BITLEN 21
 
+#define BMBT_STARTOFF_MASK     ((1ULL << BMBT_STARTOFF_BITLEN) - 1)
+
 typedef struct xfs_bmbt_rec {
        __be64                  l0, l1;
 } xfs_bmbt_rec_t;
index d38d724534c48e2a4644be06acbf6d64da9a65b2..33dc34655ac3ddb32a5a5fa4711048285c8a0343 100644 (file)
@@ -374,6 +374,47 @@ xfs_log_dinode_to_disk(
        }
 }
 
+static xfs_failaddr_t
+xfs_dinode_verify_fork(
+       struct xfs_dinode       *dip,
+       struct xfs_mount        *mp,
+       int                     whichfork)
+{
+       uint32_t                di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
+
+       switch (XFS_DFORK_FORMAT(dip, whichfork)) {
+       case XFS_DINODE_FMT_LOCAL:
+               /*
+                * no local regular files yet
+                */
+               if (whichfork == XFS_DATA_FORK) {
+                       if (S_ISREG(be16_to_cpu(dip->di_mode)))
+                               return __this_address;
+                       if (be64_to_cpu(dip->di_size) >
+                                       XFS_DFORK_SIZE(dip, mp, whichfork))
+                               return __this_address;
+               }
+               if (di_nextents)
+                       return __this_address;
+               break;
+       case XFS_DINODE_FMT_EXTENTS:
+               if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
+                       return __this_address;
+               break;
+       case XFS_DINODE_FMT_BTREE:
+               if (whichfork == XFS_ATTR_FORK) {
+                       if (di_nextents > MAXAEXTNUM)
+                               return __this_address;
+               } else if (di_nextents > MAXEXTNUM) {
+                       return __this_address;
+               }
+               break;
+       default:
+               return __this_address;
+       }
+       return NULL;
+}
+
 xfs_failaddr_t
 xfs_dinode_verify(
        struct xfs_mount        *mp,
@@ -441,24 +482,9 @@ xfs_dinode_verify(
        case S_IFREG:
        case S_IFLNK:
        case S_IFDIR:
-               switch (dip->di_format) {
-               case XFS_DINODE_FMT_LOCAL:
-                       /*
-                        * no local regular files yet
-                        */
-                       if (S_ISREG(mode))
-                               return __this_address;
-                       if (di_size > XFS_DFORK_DSIZE(dip, mp))
-                               return __this_address;
-                       if (dip->di_nextents)
-                               return __this_address;
-                       /* fall through */
-               case XFS_DINODE_FMT_EXTENTS:
-               case XFS_DINODE_FMT_BTREE:
-                       break;
-               default:
-                       return __this_address;
-               }
+               fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
+               if (fa)
+                       return fa;
                break;
        case 0:
                /* Uninitialized inode ok. */
@@ -468,17 +494,9 @@ xfs_dinode_verify(
        }
 
        if (XFS_DFORK_Q(dip)) {
-               switch (dip->di_aformat) {
-               case XFS_DINODE_FMT_LOCAL:
-                       if (dip->di_anextents)
-                               return __this_address;
-               /* fall through */
-               case XFS_DINODE_FMT_EXTENTS:
-               case XFS_DINODE_FMT_BTREE:
-                       break;
-               default:
-                       return __this_address;
-               }
+               fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
+               if (fa)
+                       return fa;
        } else {
                /*
                 * If there is no fork offset, this may be a freshly-made inode
index 65fc4ed2e9a1050b76b1cd85d874294e52a8afd9..b228c821bae6802c0aa8ab9b79069d703245bbe2 100644 (file)
@@ -1029,8 +1029,8 @@ xfs_rtalloc_query_range(
        if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
            low_rec->ar_startext == high_rec->ar_startext)
                return 0;
-       if (high_rec->ar_startext >= mp->m_sb.sb_rextents)
-               high_rec->ar_startext = mp->m_sb.sb_rextents - 1;
+       if (high_rec->ar_startext > mp->m_sb.sb_rextents)
+               high_rec->ar_startext = mp->m_sb.sb_rextents;
 
        /* Iterate the bitmap, looking for discrepancies. */
        rtstart = low_rec->ar_startext;
index c35009a8669953dfee4013615ca62b47237b4d77..83b1e8c6c18f939e8afcabdb4eb37fd33e459da8 100644 (file)
@@ -685,12 +685,10 @@ xfs_getbmap(
 }
 
 /*
- * dead simple method of punching delalyed allocation blocks from a range in
- * the inode. Walks a block at a time so will be slow, but is only executed in
- * rare error cases so the overhead is not critical. This will always punch out
- * both the start and end blocks, even if the ranges only partially overlap
- * them, so it is up to the caller to ensure that partial blocks are not
- * passed in.
+ * Dead simple method of punching delalyed allocation blocks from a range in
+ * the inode.  This will always punch out both the start and end blocks, even
+ * if the ranges only partially overlap them, so it is up to the caller to
+ * ensure that partial blocks are not passed in.
  */
 int
 xfs_bmap_punch_delalloc_range(
@@ -698,63 +696,44 @@ xfs_bmap_punch_delalloc_range(
        xfs_fileoff_t           start_fsb,
        xfs_fileoff_t           length)
 {
-       xfs_fileoff_t           remaining = length;
+       struct xfs_ifork        *ifp = &ip->i_df;
+       xfs_fileoff_t           end_fsb = start_fsb + length;
+       struct xfs_bmbt_irec    got, del;
+       struct xfs_iext_cursor  icur;
        int                     error = 0;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-       do {
-               int             done;
-               xfs_bmbt_irec_t imap;
-               int             nimaps = 1;
-               xfs_fsblock_t   firstblock;
-               struct xfs_defer_ops dfops;
+       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+               error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+               if (error)
+                       return error;
+       }
 
-               /*
-                * Map the range first and check that it is a delalloc extent
-                * before trying to unmap the range. Otherwise we will be
-                * trying to remove a real extent (which requires a
-                * transaction) or a hole, which is probably a bad idea...
-                */
-               error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
-                                      XFS_BMAPI_ENTIRE);
+       if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
+               return 0;
 
-               if (error) {
-                       /* something screwed, just bail */
-                       if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
-                               xfs_alert(ip->i_mount,
-                       "Failed delalloc mapping lookup ino %lld fsb %lld.",
-                                               ip->i_ino, start_fsb);
-                       }
-                       break;
-               }
-               if (!nimaps) {
-                       /* nothing there */
-                       goto next_block;
-               }
-               if (imap.br_startblock != DELAYSTARTBLOCK) {
-                       /* been converted, ignore */
-                       goto next_block;
-               }
-               WARN_ON(imap.br_blockcount == 0);
+       while (got.br_startoff + got.br_blockcount > start_fsb) {
+               del = got;
+               xfs_trim_extent(&del, start_fsb, length);
 
                /*
-                * Note: while we initialise the firstblock/dfops pair, they
-                * should never be used because blocks should never be
-                * allocated or freed for a delalloc extent and hence we need
-                * don't cancel or finish them after the xfs_bunmapi() call.
+                * A delete can push the cursor forward. Step back to the
+                * previous extent on non-delalloc or extents outside the
+                * target range.
                 */
-               xfs_defer_init(&dfops, &firstblock);
-               error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
-                                       &dfops, &done);
-               if (error)
-                       break;
+               if (!del.br_blockcount ||
+                   !isnullstartblock(del.br_startblock)) {
+                       if (!xfs_iext_prev_extent(ifp, &icur, &got))
+                               break;
+                       continue;
+               }
 
-               ASSERT(!xfs_defer_has_unfinished_work(&dfops));
-next_block:
-               start_fsb++;
-               remaining--;
-       } while(remaining > 0);
+               error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
+                                                 &got, &del);
+               if (error || !xfs_iext_get_extent(ifp, &icur, &got))
+                       break;
+       }
 
        return error;
 }
@@ -1208,7 +1187,22 @@ xfs_free_file_space(
                return 0;
        if (offset + len > XFS_ISIZE(ip))
                len = XFS_ISIZE(ip) - offset;
-       return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+       error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+       if (error)
+               return error;
+
+       /*
+        * If we zeroed right up to EOF and EOF straddles a page boundary we
+        * must make sure that the post-EOF area is also zeroed because the
+        * page could be mmap'd and iomap_zero_range doesn't do that for us.
+        * Writeback of the eof page will do this, albeit clumsily.
+        */
+       if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+               error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+                               (offset + len) & ~PAGE_MASK, LLONG_MAX);
+       }
+
+       return error;
 }
 
 /*
@@ -1404,6 +1398,10 @@ xfs_insert_file_space(
 
        trace_xfs_insert_file_space(ip);
 
+       error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
+       if (error)
+               return error;
+
        error = xfs_prepare_shift(ip, offset);
        if (error)
                return error;
index c34fa9c342f25fdbee7e39fead0078e30859bba3..c7157bc48bd192ea60650577232ea87e8bfbbf02 100644 (file)
@@ -513,8 +513,8 @@ xfs_getfsmap_rtdev_rtbitmap_query(
        struct xfs_trans                *tp,
        struct xfs_getfsmap_info        *info)
 {
-       struct xfs_rtalloc_rec          alow;
-       struct xfs_rtalloc_rec          ahigh;
+       struct xfs_rtalloc_rec          alow = { 0 };
+       struct xfs_rtalloc_rec          ahigh = { 0 };
        int                             error;
 
        xfs_ilock(tp->t_mountp->m_rbmip, XFS_ILOCK_SHARED);
index a7afcad6b71140aed25f02979946cb9795afa644..3f2bd6032cf86525d6d344d60be903d9c739267c 100644 (file)
@@ -387,7 +387,7 @@ xfs_reserve_blocks(
        do {
                free = percpu_counter_sum(&mp->m_fdblocks) -
                                                mp->m_alloc_set_aside;
-               if (!free)
+               if (free <= 0)
                        break;
 
                delta = request - mp->m_resblks;
index 7a96c4e0ab5c621f38d9e034622d26ebd8d95437..5df4de666cc118848c86ddc33420d4147031ce57 100644 (file)
@@ -3236,7 +3236,6 @@ xfs_iflush_cluster(
        struct xfs_inode        *cip;
        int                     nr_found;
        int                     clcount = 0;
-       int                     bufwasdelwri;
        int                     i;
 
        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -3360,37 +3359,22 @@ xfs_iflush_cluster(
         * inode buffer and shut down the filesystem.
         */
        rcu_read_unlock();
-       /*
-        * Clean up the buffer.  If it was delwri, just release it --
-        * brelse can handle it with no problems.  If not, shut down the
-        * filesystem before releasing the buffer.
-        */
-       bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
-       if (bufwasdelwri)
-               xfs_buf_relse(bp);
-
        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 
-       if (!bufwasdelwri) {
-               /*
-                * Just like incore_relse: if we have b_iodone functions,
-                * mark the buffer as an error and call them.  Otherwise
-                * mark it as stale and brelse.
-                */
-               if (bp->b_iodone) {
-                       bp->b_flags &= ~XBF_DONE;
-                       xfs_buf_stale(bp);
-                       xfs_buf_ioerror(bp, -EIO);
-                       xfs_buf_ioend(bp);
-               } else {
-                       xfs_buf_stale(bp);
-                       xfs_buf_relse(bp);
-               }
-       }
-
        /*
-        * Unlocks the flush lock
+        * We'll always have an inode attached to the buffer for completion
+        * process by the time we are called from xfs_iflush(). Hence we have
+        * always need to do IO completion processing to abort the inodes
+        * attached to the buffer.  handle them just like the shutdown case in
+        * xfs_buf_submit().
         */
+       ASSERT(bp->b_iodone);
+       bp->b_flags &= ~XBF_DONE;
+       xfs_buf_stale(bp);
+       xfs_buf_ioerror(bp, -EIO);
+       xfs_buf_ioend(bp);
+
+       /* abort the corrupt inode, as it was not attached to the buffer */
        xfs_iflush_abort(cip, false);
        kmem_free(cilist);
        xfs_perag_put(pag);
@@ -3486,12 +3470,17 @@ xfs_iflush(
                xfs_log_force(mp, 0);
 
        /*
-        * inode clustering:
-        * see if other inodes can be gathered into this write
+        * inode clustering: try to gather other inodes into this write
+        *
+        * Note: Any error during clustering will result in the filesystem
+        * being shut down and completion callbacks run on the cluster buffer.
+        * As we have already flushed and attached this inode to the buffer,
+        * it has already been aborted and released by xfs_iflush_cluster() and
+        * so we have no further error handling to do here.
         */
        error = xfs_iflush_cluster(ip, bp);
        if (error)
-               goto cluster_corrupt_out;
+               return error;
 
        *bpp = bp;
        return 0;
@@ -3500,12 +3489,8 @@ xfs_iflush(
        if (bp)
                xfs_buf_relse(bp);
        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-cluster_corrupt_out:
-       error = -EFSCORRUPTED;
 abort_out:
-       /*
-        * Unlocks the flush lock
-        */
+       /* abort the corrupt inode, as it was not attached to the buffer */
        xfs_iflush_abort(ip, false);
        return error;
 }
index 49f5492eed3bdb9d85c53843df03546c83f2c799..55876dd02f0c8c75fa5653eeab82881bd3741928 100644 (file)
@@ -963,12 +963,13 @@ xfs_ilock_for_iomap(
        unsigned                *lockmode)
 {
        unsigned                mode = XFS_ILOCK_SHARED;
+       bool                    is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
 
        /*
         * COW writes may allocate delalloc space or convert unwritten COW
         * extents, so we need to make sure to take the lock exclusively here.
         */
-       if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) {
+       if (xfs_is_reflink_inode(ip) && is_write) {
                /*
                 * FIXME: It could still overwrite on unshared extents and not
                 * need allocation.
@@ -989,6 +990,7 @@ xfs_ilock_for_iomap(
                mode = XFS_ILOCK_EXCL;
        }
 
+relock:
        if (flags & IOMAP_NOWAIT) {
                if (!xfs_ilock_nowait(ip, mode))
                        return -EAGAIN;
@@ -996,6 +998,17 @@ xfs_ilock_for_iomap(
                xfs_ilock(ip, mode);
        }
 
+       /*
+        * The reflink iflag could have changed since the earlier unlocked
+        * check, so if we got ILOCK_SHARED for a write and but we're now a
+        * reflink inode we have to switch to ILOCK_EXCL and relock.
+        */
+       if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) {
+               xfs_iunlock(ip, mode);
+               mode = XFS_ILOCK_EXCL;
+               goto relock;
+       }
+
        *lockmode = mode;
        return 0;
 }
index e040af120b69b3a69b38517cde3092773b391260..524f543c5b820fe45de5866cd950509190a74612 100644 (file)
@@ -258,7 +258,12 @@ xfs_trans_alloc(
        if (!(flags & XFS_TRANS_NO_WRITECOUNT))
                sb_start_intwrite(mp->m_super);
 
-       WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
+       /*
+        * Zero-reservation ("empty") transactions can't modify anything, so
+        * they're allowed to run while we're frozen.
+        */
+       WARN_ON(resp->tr_logres > 0 &&
+               mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
        atomic_inc(&mp->m_active_trans);
 
        tp = kmem_zone_zalloc(xfs_trans_zone,
index 40a916efd7c039d2132014fcaf5ec780e4a8248e..1194a4c78d557fb411e9672291f6bab6e3623e9d 100644 (file)
@@ -309,7 +309,7 @@ static inline void acpi_processor_ppc_exit(void)
 {
        return;
 }
-static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
+static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr,
                                                                int event_flag)
 {
        static unsigned int printout = 1;
@@ -320,7 +320,6 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
                       "Consider compiling CPUfreq support into your kernel.\n");
                printout = 0;
        }
-       return 0;
 }
 static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
 {
index 0763f065b975a543fb0e887d4af8d63bf7354f05..d10f1e7d6ba8c37140ae9332b59399baae4ffdf1 100644 (file)
@@ -63,7 +63,7 @@ typedef struct qspinlock {
 /*
  * Initializier
  */
-#define        __ARCH_SPIN_LOCK_UNLOCKED       { .val = ATOMIC_INIT(0) }
+#define        __ARCH_SPIN_LOCK_UNLOCKED       { { .val = ATOMIC_INIT(0) } }
 
 /*
  * Bitfields in the atomic value:
index cc414db9da0ad6758f696d0de2a251ce99d8d301..482461d8931d9186c4a11b7b2d9a24f981a595bc 100644 (file)
@@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
                        int offset, size_t size, int flags);
 void af_alg_free_resources(struct af_alg_async_req *areq);
 void af_alg_async_cb(struct crypto_async_request *_req, int err);
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events);
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+                        poll_table *wait);
 struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
                                           unsigned int areqlen);
 int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
index 4b35a66383f983f5594f3b71885145d6b1b101ef..e54f40974eb04ca516987ac3df89b0997b5ca0dd 100644 (file)
@@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res);
 int acpi_check_region(resource_size_t start, resource_size_t n,
                      const char *name);
 
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+                               u32 level);
+
 int acpi_resources_are_enforced(void);
 
 #ifdef CONFIG_HIBERNATION
index 0c27515d2cf6db3683da2341a700283f82a99645..8124815eb1218b5653572fc4a04f5d4d734e3469 100644 (file)
@@ -214,6 +214,7 @@ struct atmphy_ops {
 struct atm_skb_data {
        struct atm_vcc  *vcc;           /* ATM VCC */
        unsigned long   atm_options;    /* ATM layer options */
+       unsigned int    acct_truesize;  /* truesize accounted to vcc */
 };
 
 #define VCC_HTABLE_SIZE 32
@@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
 
 void atm_dev_release_vccs(struct atm_dev *dev);
 
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+       /*
+        * Because ATM skbs may not belong to a sock (and we don't
+        * necessarily want to), skb->truesize may be adjusted,
+        * escaping the hack in pskb_expand_head() which avoids
+        * doing so for some cases. So stash the value of truesize
+        * at the time we accounted it, and atm_pop_raw() can use
+        * that value later, in case it changes.
+        */
+       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       ATM_SKB(skb)->acct_truesize = skb->truesize;
+       ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
 
 static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
 {
index 0bd432a4d7bd00ce376292720edd104d617c80c2..24251762c20c94edd238cfca1c1f55f0269d4e80 100644 (file)
@@ -22,7 +22,6 @@ struct dentry;
  */
 enum wb_state {
        WB_registered,          /* bdi_register() was done */
-       WB_shutting_down,       /* wb_shutdown() in progress */
        WB_writeback_running,   /* Writeback is in progress */
        WB_has_dirty_io,        /* Dirty inodes on ->b_{dirty|io|more_io} */
        WB_start_all,           /* nr_pages == 0 (all) work pending */
@@ -189,6 +188,7 @@ struct backing_dev_info {
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
        struct rb_root cgwb_congested_tree; /* their congested states */
+       struct mutex cgwb_release_mutex;  /* protect shutdown of wb structs */
 #else
        struct bdi_writeback_congested *wb_congested;
 #endif
index 9154570edf2963628f873d7404930450735ff41a..79226ca8f80f2db7f813cf63973c61288c1b78ab 100644 (file)
@@ -1119,8 +1119,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
        if (!q->limits.chunk_sectors)
                return q->limits.max_sectors;
 
-       return q->limits.chunk_sectors -
-                       (offset & (q->limits.chunk_sectors - 1));
+       return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
+                       (offset & (q->limits.chunk_sectors - 1))));
 }
 
 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
index 975fb4cf1bb743ccff5fae92e82df582533c0ff2..79795c5fa7c37d3281fdc006ac701a4cb17c4cf6 100644 (file)
@@ -188,12 +188,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
                                                                              \
        __ret;                                                                \
 })
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype, struct bpf_prog *prog);
+int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype);
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                         union bpf_attr __user *uattr);
 #else
 
+struct bpf_prog;
 struct cgroup_bpf {};
 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
 
+static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                                        enum bpf_prog_type ptype,
+                                        struct bpf_prog *prog)
+{
+       return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+                                        enum bpf_prog_type ptype)
+{
+       return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                                       union bpf_attr __user *uattr)
+{
+       return -EINVAL;
+}
+
 #define cgroup_bpf_enabled (0)
 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
index 995c3b1e59bfa82ef3ad0504b090ab28a898f016..8827e797ff97d0973ddf1d4217a885cee9bb63ee 100644 (file)
@@ -488,12 +488,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
 
 /* Map specifics */
 struct xdp_buff;
+struct sk_buff;
 
 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
 void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
 void __dev_map_flush(struct bpf_map *map);
 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
                    struct net_device *dev_rx);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog);
 
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
 void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
@@ -586,6 +589,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
        return 0;
 }
 
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+                                          struct sk_buff *skb,
+                                          struct bpf_prog *xdp_prog)
+{
+       return 0;
+}
+
 static inline
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
 {
@@ -684,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
 struct sock  *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                       struct bpf_prog *prog);
 #else
 static inline struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 {
@@ -702,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map,
 {
        return -EOPNOTSUPP;
 }
+
+static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                                     struct bpf_prog *prog)
+{
+       return -EINVAL;
+}
 #endif
 
 #if defined(CONFIG_XDP_SOCKETS)
index 5f8a4283092d0a6960fd663a33832221a9615353..9d9ff755ec2972cf6e46d1905e1a5caae9dd5ae6 100644 (file)
@@ -5,11 +5,12 @@
 #include <uapi/linux/bpf.h>
 
 #ifdef CONFIG_BPF_LIRC_MODE2
-int lirc_prog_attach(const union bpf_attr *attr);
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
 int lirc_prog_detach(const union bpf_attr *attr);
 int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
 #else
-static inline int lirc_prog_attach(const union bpf_attr *attr)
+static inline int lirc_prog_attach(const union bpf_attr *attr,
+                                  struct bpf_prog *prog)
 {
        return -EINVAL;
 }
index b1a5562b3215b71302422b7a727bbb2cf499d8f3..c68acc47da57b6a7bef7b8ef84a9c897d4b83ce6 100644 (file)
@@ -72,6 +72,9 @@
  */
 #ifndef COMPAT_SYSCALL_DEFINEx
 #define COMPAT_SYSCALL_DEFINEx(x, name, ...)                                   \
+       __diag_push();                                                          \
+       __diag_ignore(GCC, 8, "-Wattribute-alias",                              \
+                     "Type aliasing is used to sanitize syscall arguments");\
        asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));       \
        asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))        \
                __attribute__((alias(__stringify(__se_compat_sys##name))));     \
        asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__));  \
        asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))   \
        {                                                                       \
-               return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+               long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+               __MAP(x,__SC_TEST,__VA_ARGS__);                                 \
+               return ret;                                                     \
        }                                                                       \
+       __diag_pop();                                                           \
        static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 #endif /* COMPAT_SYSCALL_DEFINEx */
 
index f1a7492a5cc8cc59813734d1b258dbaf04bf76c8..fd282c7d3e5e88463f5efc512916b9ff174b4e41 100644 (file)
 #if GCC_VERSION >= 50100
 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
 #endif
+
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_GCC(version, severity, s) \
+       __diag_GCC_ ## version(__diag_GCC_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_GCC_ignore      ignored
+#define __diag_GCC_warn                warning
+#define __diag_GCC_error       error
+
+/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
+#if GCC_VERSION >= 40600
+#define __diag_str1(s)         #s
+#define __diag_str(s)          __diag_str1(s)
+#define __diag(s)              _Pragma(__diag_str(GCC diagnostic s))
+#endif
+
+#if GCC_VERSION >= 80000
+#define __diag_GCC_8(s)                __diag(s)
+#else
+#define __diag_GCC_8(s)
+#endif
index 6b79a9bba9a7630eb0b3a8fe35251d41717a2da0..a8ba6b04152c13c9ca2960898cd6ea4e89d37957 100644 (file)
@@ -271,4 +271,22 @@ struct ftrace_likely_data {
 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 #endif
 
+#ifndef __diag
+#define __diag(string)
+#endif
+
+#ifndef __diag_GCC
+#define __diag_GCC(version, severity, string)
+#endif
+
+#define __diag_push()  __diag(push)
+#define __diag_pop()   __diag(pop)
+
+#define __diag_ignore(compiler, version, option, comment) \
+       __diag_ ## compiler(version, ignore, option)
+#define __diag_warn(compiler, version, option, comment) \
+       __diag_ ## compiler(version, warn, option)
+#define __diag_error(compiler, version, option, comment) \
+       __diag_ ## compiler(version, error, option)
+
 #endif /* __LINUX_COMPILER_TYPES_H */
index bf53d893ad02bbe460dd64ce03d8cfe10d709931..57f20a0a7794908b47fdb151e530d53e3a598b54 100644 (file)
@@ -115,12 +115,17 @@ extern struct cpumask __cpu_active_mask;
 #define cpu_active(cpu)                ((cpu) == 0)
 #endif
 
-/* verify cpu argument to cpumask_* operators */
-static inline unsigned int cpumask_check(unsigned int cpu)
+static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
 {
 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
-       WARN_ON_ONCE(cpu >= nr_cpumask_bits);
+       WARN_ON_ONCE(cpu >= bits);
 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+}
+
+/* verify cpu argument to cpumask_* operators */
+static inline unsigned int cpumask_check(unsigned int cpu)
+{
+       cpu_max_bits_warn(cpu, nr_cpumask_bits);
        return cpu;
 }
 
index 3855e3800f483e07cc4c16e68f6a1f2780de1b3e..deb0f663252fc55e39546c7d3107e96dfb3f03ae 100644 (file)
@@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
 
 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
                    pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
                enum page_entry_size pe_size, pfn_t pfn);
index b67bf6ac907d8f324494efaf1d441b0ee7955a13..3c5a4cb3eb953174c688c4b965ba09d87925fdb3 100644 (file)
@@ -48,7 +48,7 @@
  *   CMA should not be used by the device drivers directly. It is
  *   only a helper framework for dma-mapping subsystem.
  *
- *   For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ *   For more information, see kernel-docs in kernel/dma/contiguous.c
  */
 
 #ifdef __KERNEL__
index 79563840c295cfcd40d17287eebcf793ee010020..572e11bb869672cbc9b6da8a375760b192f9eaf9 100644 (file)
@@ -59,8 +59,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
                                           unsigned int rxqs);
 #define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
 
-struct sk_buff **eth_gro_receive(struct sk_buff **head,
-                                struct sk_buff *skb);
+struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
 int eth_gro_complete(struct sk_buff *skb, int nhoff);
 
 /* Reserved Ethernet Addresses per IEEE 802.1Q */
index 45fc0f5000d8899ead3592cbdaa813d726e2c2af..300baad62c889100722b1778a7c4819a16fbe2d1 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/cryptohash.h>
 #include <linux/set_memory.h>
 #include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
 
 #include <net/sch_generic.h>
 
@@ -469,15 +470,16 @@ struct sock_fprog_kern {
 };
 
 struct bpf_binary_header {
-       unsigned int pages;
-       u8 image[];
+       u32 pages;
+       /* Some arches need word alignment for their instructions */
+       u8 image[] __aligned(4);
 };
 
 struct bpf_prog {
        u16                     pages;          /* Number of allocated pages */
        u16                     jited:1,        /* Is our filter JIT'ed? */
                                jit_requested:1,/* archs need to JIT the prog */
-                               locked:1,       /* Program image locked? */
+                               undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
                                gpl_compatible:1, /* Is filter GPL compatible? */
                                cb_access:1,    /* Is control block accessed? */
                                dst_needed:1,   /* Do we need dst entry? */
@@ -671,50 +673,27 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
 
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
-       fp->locked = 1;
-       WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
-}
-
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-       if (fp->locked) {
-               WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
-               /* In case set_memory_rw() fails, we want to be the first
-                * to crash here instead of some random place later on.
-                */
-               fp->locked = 0;
-       }
-}
-
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
-       WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
-       WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
-#else
 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 {
+       fp->undo_set_mem = 1;
+       set_memory_ro((unsigned long)fp, fp->pages);
 }
 
 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 {
+       if (fp->undo_set_mem)
+               set_memory_rw((unsigned long)fp, fp->pages);
 }
 
 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 {
+       set_memory_ro((unsigned long)hdr, hdr->pages);
 }
 
 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
 {
+       set_memory_rw((unsigned long)hdr, hdr->pages);
 }
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
 
 static inline struct bpf_binary_header *
 bpf_jit_binary_hdr(const struct bpf_prog *fp)
@@ -786,6 +765,21 @@ static inline bool bpf_dump_raw_ok(void)
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                                       const struct bpf_insn *patch, u32 len);
 
+static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb,
+                                          struct net_device *fwd)
+{
+       unsigned int len;
+
+       if (unlikely(!(fwd->flags & IFF_UP)))
+               return -ENETDOWN;
+
+       len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+       if (skb->len > len)
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
  * same cpu context. Further for best results no more than a single map
  * for the do_redirect/do_flush pair should be used. This limitation is
@@ -961,6 +955,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 }
 #endif /* CONFIG_BPF_JIT */
 
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
 #define BPF_ANC                BIT(15)
 
 static inline bool bpf_needs_clear_a(const struct sock_filter *first)
index 5c91108846db20894ab70dafe43b7922fe08fb1f..d78d146a98da95c9c76417f8196f24be515df612 100644 (file)
@@ -1720,8 +1720,6 @@ struct file_operations {
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
index b462d9ea80078c01980b0ff596fd80bc10d4870c..dc3dac40f0698f28824481371514ce1974599be5 100644 (file)
@@ -11,9 +11,8 @@
 
 /*
  * qoriq ptp registers
- * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
  */
-struct qoriq_ptp_registers {
+struct ctrl_regs {
        u32 tmr_ctrl;     /* Timer control register */
        u32 tmr_tevent;   /* Timestamp event register */
        u32 tmr_temask;   /* Timer event mask register */
@@ -28,22 +27,47 @@ struct qoriq_ptp_registers {
        u8  res1[4];
        u32 tmroff_h;     /* Timer offset high */
        u32 tmroff_l;     /* Timer offset low */
-       u8  res2[8];
+};
+
+struct alarm_regs {
        u32 tmr_alarm1_h; /* Timer alarm 1 high register */
        u32 tmr_alarm1_l; /* Timer alarm 1 high register */
        u32 tmr_alarm2_h; /* Timer alarm 2 high register */
        u32 tmr_alarm2_l; /* Timer alarm 2 high register */
-       u8  res3[48];
+};
+
+struct fiper_regs {
        u32 tmr_fiper1;   /* Timer fixed period interval */
        u32 tmr_fiper2;   /* Timer fixed period interval */
        u32 tmr_fiper3;   /* Timer fixed period interval */
-       u8  res4[20];
+};
+
+struct etts_regs {
        u32 tmr_etts1_h;  /* Timestamp of general purpose external trigger */
        u32 tmr_etts1_l;  /* Timestamp of general purpose external trigger */
        u32 tmr_etts2_h;  /* Timestamp of general purpose external trigger */
        u32 tmr_etts2_l;  /* Timestamp of general purpose external trigger */
 };
 
+struct qoriq_ptp_registers {
+       struct ctrl_regs __iomem *ctrl_regs;
+       struct alarm_regs __iomem *alarm_regs;
+       struct fiper_regs __iomem *fiper_regs;
+       struct etts_regs __iomem *etts_regs;
+};
+
+/* Offset definitions for the four register groups */
+#define CTRL_REGS_OFFSET       0x0
+#define ALARM_REGS_OFFSET      0x40
+#define FIPER_REGS_OFFSET      0x80
+#define ETTS_REGS_OFFSET       0xa0
+
+#define FMAN_CTRL_REGS_OFFSET  0x80
+#define FMAN_ALARM_REGS_OFFSET 0xb8
+#define FMAN_FIPER_REGS_OFFSET 0xd0
+#define FMAN_ETTS_REGS_OFFSET  0xe0
+
+
 /* Bit definitions for the TMR_CTRL register */
 #define ALM1P                 (1<<31) /* Alarm1 output polarity */
 #define ALM2P                 (1<<30) /* Alarm2 output polarity */
@@ -105,10 +129,10 @@ struct qoriq_ptp_registers {
 #define DRIVER         "ptp_qoriq"
 #define DEFAULT_CKSEL  1
 #define N_EXT_TS       2
-#define REG_SIZE       sizeof(struct qoriq_ptp_registers)
 
 struct qoriq_ptp {
-       struct qoriq_ptp_registers __iomem *regs;
+       void __iomem *base;
+       struct qoriq_ptp_registers regs;
        spinlock_t lock; /* protects regs */
        struct ptp_clock *clock;
        struct ptp_clock_info caps;
index e5fd2707b6df79f4b8d1d6bb4bf01e981e379f3a..9493d4a388dbb9a3ac71b0fe9e56566eb90c8327 100644 (file)
@@ -93,6 +93,7 @@ enum hwmon_temp_attributes {
 #define HWMON_T_MIN_ALARM      BIT(hwmon_temp_min_alarm)
 #define HWMON_T_MAX_ALARM      BIT(hwmon_temp_max_alarm)
 #define HWMON_T_CRIT_ALARM     BIT(hwmon_temp_crit_alarm)
+#define HWMON_T_LCRIT_ALARM    BIT(hwmon_temp_lcrit_alarm)
 #define HWMON_T_EMERGENCY_ALARM        BIT(hwmon_temp_emergency_alarm)
 #define HWMON_T_FAULT          BIT(hwmon_temp_fault)
 #define HWMON_T_OFFSET         BIT(hwmon_temp_offset)
@@ -187,12 +188,16 @@ enum hwmon_power_attributes {
        hwmon_power_cap_hyst,
        hwmon_power_cap_max,
        hwmon_power_cap_min,
+       hwmon_power_min,
        hwmon_power_max,
        hwmon_power_crit,
+       hwmon_power_lcrit,
        hwmon_power_label,
        hwmon_power_alarm,
        hwmon_power_cap_alarm,
+       hwmon_power_min_alarm,
        hwmon_power_max_alarm,
+       hwmon_power_lcrit_alarm,
        hwmon_power_crit_alarm,
 };
 
@@ -213,12 +218,16 @@ enum hwmon_power_attributes {
 #define HWMON_P_CAP_HYST               BIT(hwmon_power_cap_hyst)
 #define HWMON_P_CAP_MAX                        BIT(hwmon_power_cap_max)
 #define HWMON_P_CAP_MIN                        BIT(hwmon_power_cap_min)
+#define HWMON_P_MIN                    BIT(hwmon_power_min)
 #define HWMON_P_MAX                    BIT(hwmon_power_max)
+#define HWMON_P_LCRIT                  BIT(hwmon_power_lcrit)
 #define HWMON_P_CRIT                   BIT(hwmon_power_crit)
 #define HWMON_P_LABEL                  BIT(hwmon_power_label)
 #define HWMON_P_ALARM                  BIT(hwmon_power_alarm)
 #define HWMON_P_CAP_ALARM              BIT(hwmon_power_cap_alarm)
+#define HWMON_P_MIN_ALARM              BIT(hwmon_power_max_alarm)
 #define HWMON_P_MAX_ALARM              BIT(hwmon_power_max_alarm)
+#define HWMON_P_LCRIT_ALARM            BIT(hwmon_power_lcrit_alarm)
 #define HWMON_P_CRIT_ALARM             BIT(hwmon_power_crit_alarm)
 
 enum hwmon_energy_attributes {
@@ -389,4 +398,27 @@ devm_hwmon_device_register_with_info(struct device *dev,
 void hwmon_device_unregister(struct device *dev);
 void devm_hwmon_device_unregister(struct device *dev);
 
+/**
+ * hwmon_is_bad_char - Is the char invalid in a hwmon name
+ * @ch: the char to be considered
+ *
+ * hwmon_is_bad_char() can be used to determine if the given character
+ * may not be used in a hwmon name.
+ *
+ * Returns true if the char is invalid, false otherwise.
+ */
+static inline bool hwmon_is_bad_char(const char ch)
+{
+       switch (ch) {
+       case '-':
+       case '*':
+       case ' ':
+       case '\t':
+       case '\n':
+               return true;
+       default:
+               return false;
+       }
+}
+
 #endif
index 8fe7e4306816f44c586e860744031ae3d038df6f..9c03a7d5e400df9f2853c60c9816c150c447d937 100644 (file)
@@ -1433,11 +1433,13 @@ struct ieee80211_ht_operation {
 #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
 
 /*
- * A-PMDU buffer sizes
- * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
+ * A-MPDU buffer sizes
+ * According to HT size varies from 8 to 64 frames
+ * HE adds the ability to have up to 256 frames.
  */
-#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
+#define IEEE80211_MIN_AMPDU_BUF                0x8
+#define IEEE80211_MAX_AMPDU_BUF_HT     0x40
+#define IEEE80211_MAX_AMPDU_BUF                0x100
 
 
 /* Spatial Multiplexing Power Save Modes (for capability) */
@@ -1539,6 +1541,106 @@ struct ieee80211_vht_operation {
        __le16 basic_mcs_set;
 } __packed;
 
+/**
+ * struct ieee80211_he_cap_elem - HE capabilities element
+ *
+ * This structure is the "HE capabilities element" fixed fields as
+ * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3
+ */
+struct ieee80211_he_cap_elem {
+       u8 mac_cap_info[5];
+       u8 phy_cap_info[9];
+} __packed;
+
+#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN   5
+
+/**
+ * enum ieee80211_he_mcs_support - HE MCS support definitions
+ * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ *     number of streams
+ * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
+ * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the rx_mcs_*
+ * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_he_mcs_support {
+       IEEE80211_HE_MCS_SUPPORT_0_7    = 0,
+       IEEE80211_HE_MCS_SUPPORT_0_9    = 1,
+       IEEE80211_HE_MCS_SUPPORT_0_11   = 2,
+       IEEE80211_HE_MCS_NOT_SUPPORTED  = 3,
+};
+
+/**
+ * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
+ *
+ * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
+ * described in P802.11ax_D2.0 section 9.4.2.237.4
+ *
+ * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ *     widths less than 80MHz.
+ * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ *     widths less than 80MHz.
+ * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ *     width 160MHz.
+ * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ *     width 160MHz.
+ * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
+ *     channel width 80p80MHz.
+ * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
+ *     channel width 80p80MHz.
+ */
+struct ieee80211_he_mcs_nss_supp {
+       __le16 rx_mcs_80;
+       __le16 tx_mcs_80;
+       __le16 rx_mcs_160;
+       __le16 tx_mcs_160;
+       __le16 rx_mcs_80p80;
+       __le16 tx_mcs_80p80;
+} __packed;
+
+/**
+ * struct ieee80211_he_operation - HE capabilities element
+ *
+ * This structure is the "HE operation element" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.238
+ */
+struct ieee80211_he_operation {
+       __le32 he_oper_params;
+       __le16 he_mcs_nss_set;
+       /* Optional 0,1,3 or 4 bytes: depends on @he_oper_params */
+       u8 optional[0];
+} __packed;
+
+/**
+ * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ *
+ * This structure is the "MU AC Parameter Record" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.240
+ */
+struct ieee80211_he_mu_edca_param_ac_rec {
+       u8 aifsn;
+       u8 ecw_min_max;
+       u8 mu_edca_timer;
+} __packed;
+
+/**
+ * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ *
+ * This structure is the "MU EDCA Parameter Set element" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.240
+ */
+struct ieee80211_mu_edca_param_set {
+       u8 mu_qos_info;
+       struct ieee80211_he_mu_edca_param_ac_rec ac_be;
+       struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
+       struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
+       struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
+} __packed;
 
 /* 802.11ac VHT Capabilities */
 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895                 0x00000000
@@ -1577,6 +1679,328 @@ struct ieee80211_vht_operation {
 #define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN                   0x10000000
 #define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN                   0x20000000
 
+/* 802.11ax HE MAC capabilities */
+#define IEEE80211_HE_MAC_CAP0_HTC_HE                           0x01
+#define IEEE80211_HE_MAC_CAP0_TWT_REQ                          0x02
+#define IEEE80211_HE_MAC_CAP0_TWT_RES                          0x04
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP            0x00
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1             0x08
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2             0x10
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3             0x18
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK                        0x18
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1              0x00
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2              0x20
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4              0x40
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8              0x60
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16             0x80
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32             0xa0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64             0xc0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED      0xe0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK           0xe0
+
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED          0x00
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128                        0x01
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256                        0x02
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512                        0x03
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK               0x03
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US               0x00
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US               0x04
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US              0x08
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK              0x0c
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1              0x00
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2              0x10
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3              0x20
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4              0x30
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5              0x40
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6              0x50
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7              0x60
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8              0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK           0x70
+
+/* Link adaptation is split between byte HE_MAC_CAP1 and
+ * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
+ * in which case the following values apply:
+ * 0 = No feedback.
+ * 1 = reserved.
+ * 2 = Unsolicited feedback.
+ * 3 = both
+ */
+#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION                  0x80
+
+#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION                  0x01
+#define IEEE80211_HE_MAC_CAP2_ALL_ACK                          0x02
+#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED                 0x04
+#define IEEE80211_HE_MAC_CAP2_BSR                              0x08
+#define IEEE80211_HE_MAC_CAP2_BCAST_TWT                                0x10
+#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP                  0x20
+#define IEEE80211_HE_MAC_CAP2_MU_CASCADING                     0x40
+#define IEEE80211_HE_MAC_CAP2_ACK_EN                           0x80
+
+#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU      0x01
+#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL                      0x02
+#define IEEE80211_HE_MAC_CAP3_OFDMA_RA                         0x04
+
+/* The maximum length of an A-MDPU is defined by the combination of the Maximum
+ * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
+ * same field in the HE capabilities.
+ */
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT      0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1                0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2                0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED     0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK         0x18
+#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG                     0x20
+#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED                   0x40
+#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS                0x80
+
+#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG             0x01
+#define IEEE80211_HE_MAC_CAP4_QTP                              0x02
+#define IEEE80211_HE_MAC_CAP4_BQR                              0x04
+#define IEEE80211_HE_MAC_CAP4_SR_RESP                          0x08
+#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP                       0x10
+#define IEEE80211_HE_MAC_CAP4_OPS                              0x20
+#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU                   0x40
+
+/* 802.11ax HE PHY capabilities */
+#define IEEE80211_HE_PHY_CAP0_DUAL_BAND                                        0x01
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G            0x02
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G      0x04
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G           0x08
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G     0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G       0x20
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G       0x40
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK                   0xfe
+
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ        0x04
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ        0x08
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK                    0x0f
+#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A                           0x10
+#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD                   0x20
+#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US         0x40
+/* Midamble RX Max NSTS is split between byte #2 and byte #3 */
+#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS                     0x80
+
+#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS                     0x01
+#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US                     0x02
+#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ                      0x04
+#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ                      0x08
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX                               0x10
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX                               0x20
+
+/* Note that the meaning of UL MU below is different between an AP and a non-AP
+ * sta, where in the AP case it indicates support for Rx and in the non-AP sta
+ * case it indicates support for Tx.
+ */
+#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO                       0x40
+#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO                    0x80
+
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM                  0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK                    0x01
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK                    0x02
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM                  0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK                    0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1                         0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2                         0x04
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM                  0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK                    0x08
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK                    0x10
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM                  0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK                    0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1                         0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2                         0x20
+#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA            0x40
+#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER                            0x80
+
+#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE                            0x01
+#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER                            0x02
+
+/* Minimal allowed value of Max STS under 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4         0x0c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5         0x10
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6         0x14
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7         0x18
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8         0x1c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK      0x1c
+
+/* Minimal allowed value of Max STS above 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4         0x60
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5         0x80
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6         0xa0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7         0xc0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8         0xe0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK      0xe0
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1     0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2     0x01
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3     0x02
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4     0x03
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5     0x04
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6     0x05
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7     0x06
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8     0x07
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK  0x07
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1     0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2     0x08
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3     0x10
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4     0x18
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5     0x20
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6     0x28
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7     0x30
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8     0x38
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK  0x38
+
+#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK                         0x40
+#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK                         0x80
+
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU                      0x01
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU                      0x02
+#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB                    0x04
+#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB                    0x08
+#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB                              0x10
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE                     0x20
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO              0x40
+#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT                    0x80
+
+#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR                             0x01
+#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR                    0x02
+#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI         0x04
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_1                                 0x08
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_2                                 0x10
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_3                                 0x18
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_4                                 0x20
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_5                                 0x28
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_6                                 0x30
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_7                                 0x38
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK                              0x38
+#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ                      0x40
+#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ                      0x80
+
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI         0x01
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G             0x02
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU                  0x04
+#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU                  0x08
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI              0x10
+#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF                 0x20
+
+/* 802.11ax HE TX/RX MCS NSS Support  */
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS                   (3)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS                     (6)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS                     (11)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK                    0x07c0
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK                    0xf800
+
+/* TX/RX HE MCS Support field Highest MCS subfield encoding */
+enum ieee80211_he_highest_mcs_supported_subfield_enc {
+       HIGHEST_MCS_SUPPORTED_MCS7 = 0,
+       HIGHEST_MCS_SUPPORTED_MCS8,
+       HIGHEST_MCS_SUPPORTED_MCS9,
+       HIGHEST_MCS_SUPPORTED_MCS10,
+       HIGHEST_MCS_SUPPORTED_MCS11,
+};
+
+/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
+static inline u8
+ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
+{
+       u8 count = 4;
+
+       if (he_cap->phy_cap_info[0] &
+           IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+               count += 4;
+
+       if (he_cap->phy_cap_info[0] &
+           IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+               count += 4;
+
+       return count;
+}
+
+/* 802.11ax HE PPE Thresholds */
+#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS                   (1)
+#define IEEE80211_PPE_THRES_NSS_POS                            (0)
+#define IEEE80211_PPE_THRES_NSS_MASK                           (7)
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU  \
+       (BIT(5) | BIT(6))
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK              0x78
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS               (3)
+#define IEEE80211_PPE_THRES_INFO_PPET_SIZE                     (3)
+
+/*
+ * Calculate 802.11ax HE capabilities IE PPE field size
+ * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
+ */
+static inline u8
+ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+       u8 n;
+
+       if ((phy_cap_info[6] &
+            IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
+               return 0;
+
+       n = hweight8(ppe_thres_hdr &
+                    IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+       n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
+                  IEEE80211_PPE_THRES_NSS_POS));
+
+       /*
+        * Each pair is 6 bits, and we need to add the 7 "header" bits to the
+        * total size.
+        */
+       n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
+       n = DIV_ROUND_UP(n, 8);
+
+       return n;
+}
+
+/* HE Operation defines */
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK                  0x0000003f
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK           0x000001c0
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET         6
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED                    0x00000200
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK              0x000ffc00
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET            10
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR               0x000100000
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO                   0x000200000
+#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP                  0x10000000
+#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR              0x20000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED              0x40000000
+
+/*
+ * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
+ * @he_oper_ie: byte data of the He Operations IE, stating from the the byte
+ *     after the ext ID byte. It is assumed that he_oper_ie has at least
+ *     sizeof(struct ieee80211_he_operation) bytes, checked already in
+ *     ieee802_11_parse_elems_crc()
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_oper_size(const u8 *he_oper_ie)
+{
+       struct ieee80211_he_operation *he_oper = (void *)he_oper_ie;
+       u8 oper_len = sizeof(struct ieee80211_he_operation);
+       u32 he_oper_params;
+
+       /* Make sure the input is not NULL */
+       if (!he_oper_ie)
+               return 0;
+
+       /* Calc required length */
+       he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+       if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+               oper_len += 3;
+       if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP)
+               oper_len++;
+
+       /* Add the first byte (extension ID) to the total length */
+       oper_len++;
+
+       return oper_len;
+}
+
 /* Authentication algorithms */
 #define WLAN_AUTH_OPEN 0
 #define WLAN_AUTH_SHARED_KEY 1
@@ -1992,6 +2416,11 @@ enum ieee80211_eid_ext {
        WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
        WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
        WLAN_EID_EXT_FILS_NONCE = 13,
+       WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
+       WLAN_EID_EXT_HE_CAPABILITY = 35,
+       WLAN_EID_EXT_HE_OPERATION = 36,
+       WLAN_EID_EXT_UORA = 37,
+       WLAN_EID_EXT_HE_MU_EDCA = 38,
 };
 
 /* Action category code */
index d95cae09dea0873a0cb119e63f5c3d6d7c73d823..ac42da56f7a28f375dbc50887fee19eaf97c2327 100644 (file)
@@ -74,6 +74,11 @@ struct team_port {
        long mode_priv[0];
 };
 
+static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+       return rcu_dereference(dev->rx_handler_data);
+}
+
 static inline bool team_port_enabled(struct team_port *port)
 {
        return port->index != -1;
@@ -84,6 +89,19 @@ static inline bool team_port_txable(struct team_port *port)
        return port->linkup && team_port_enabled(port);
 }
 
+static inline bool team_port_dev_txable(const struct net_device *port_dev)
+{
+       struct team_port *port;
+       bool txable;
+
+       rcu_read_lock();
+       port = team_port_get_rcu(port_dev);
+       txable = port ? team_port_txable(port) : false;
+       rcu_read_unlock();
+
+       return txable;
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static inline void team_netpoll_send_skb(struct team_port *port,
                                         struct sk_buff *skb)
index 767467d886de4d53f5f5b862614b3f1644a5ecfa..67c75372b6915289e6d0876ac21368c89eb3896a 100644 (file)
@@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
        char __user *user_buffer);
 size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
 int iio_dma_buffer_request_update(struct iio_buffer *buffer);
 
 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
index d7188de4db968c14c5db1a44fca6421aec22d041..3f4bf60b0bb55c4d9d2708593d2439aec269f9c9 100644 (file)
@@ -100,7 +100,7 @@ static inline bool input_is_mt_axis(int axis)
        return axis == ABS_MT_SLOT || input_is_mt_value(axis);
 }
 
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active);
 
 void input_mt_report_finger_count(struct input_dev *dev, int count);
index 6cc2df7f7ac949e72a59821ae9d25ce529371213..e1c9eea6015b56b3a671813d0dfe2bc2f6d1cf61 100644 (file)
@@ -4,7 +4,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/uidgid.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
 #include <uapi/linux/ipc.h>
 #include <linux/refcount.h>
 
index b5630c8eb2f3a910b922eeb08153635e88faadbb..6cea726612b770168eab5041259d8c52fc264bf2 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/nsproxy.h>
 #include <linux/ns_common.h>
 #include <linux/refcount.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
 
 struct user_namespace;
 
index 4bd2f34947f4a7647a485fe2e8092c1fd055f630..201de12a9957171003757967bb69161c3d060575 100644 (file)
@@ -503,6 +503,7 @@ struct irq_chip {
  * IRQCHIP_SKIP_SET_WAKE:      Skip chip.irq_set_wake(), for this irq chip
  * IRQCHIP_ONESHOT_SAFE:       One shot does not require mask/unmask
  * IRQCHIP_EOI_THREADED:       Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI  Chip can provide two doorbells for Level MSIs
  */
 enum {
        IRQCHIP_SET_TYPE_MASKED         = (1 <<  0),
index 25b33b66453773cb01509725fa68664c555ffd3f..dd1e40ddac7d8235e31aeb96fe460c77a70ac681 100644 (file)
@@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
        return desc->irq_common_data.handler_data;
 }
 
-static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
-{
-       return desc->irq_common_data.msi_desc;
-}
-
 /*
  * Architectures call this to let the generic IRQ layer
  * handle an interrupt.
index d231232385349146faf64c42cd05a73bddd0fcca..941dc0a5a877998e46d11541bdb491655a5f34af 100644 (file)
@@ -666,7 +666,7 @@ do {                                                                        \
  * your code. (Extra memory is used for special buffers that are
  * allocated when trace_printk() is used.)
  *
- * A little optization trick is done here. If there's only one
+ * A little optimization trick is done here. If there's only one
  * argument, there's no need to scan the string for printf formats.
  * The trace_puts() will suffice. But how can we take advantage of
  * using trace_puts() when trace_printk() has only one argument?
index 4b129df4d46b5a4c26d970c6a0c385b6c208d9d1..de04cc5ed53673ebea7362bea2a43f8355018125 100644 (file)
@@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list,
                __list_cut_position(list, head, entry);
 }
 
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list.  You should pass
+ * in @entry an element you know is on @head.  @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+                                  struct list_head *head,
+                                  struct list_head *entry)
+{
+       if (head->next == entry) {
+               INIT_LIST_HEAD(list);
+               return;
+       }
+       list->next = head->next;
+       list->next->prev = list;
+       list->prev = entry->prev;
+       list->prev->next = list;
+       head->next = entry;
+       entry->prev = head;
+}
+
 static inline void __list_splice(const struct list_head *list,
                                 struct list_head *prev,
                                 struct list_head *next)
index 31ca3e28b0ebe98369a1582430230a2f68c6baae..a6ddefc60517899167b55b53b0007ba3e3b9ed80 100644 (file)
@@ -38,6 +38,7 @@ struct memory_block {
 
 int arch_get_memory_phys_device(unsigned long start_pfn);
 unsigned long memory_block_size_bytes(void);
+int set_memory_block_size_order(unsigned int order);
 
 /* These states are exposed to userspace as text strings in sysfs */
 #define        MEM_ONLINE              (1<<0) /* exposed to userspace */
index 122e7e9d3091b5b55f4ded91ae7a9dccb620d193..dca6ab4eaa9927168003877f884965520bcdb4f7 100644 (file)
@@ -630,6 +630,7 @@ struct mlx4_caps {
        u32                     vf_caps;
        bool                    wol_port[MLX4_MAX_PORTS + 1];
        struct mlx4_rate_limit_caps rl_caps;
+       u32                     health_buffer_addrs;
 };
 
 struct mlx4_buf_list {
@@ -851,6 +852,12 @@ struct mlx4_vf_dev {
        u8                      n_ports;
 };
 
+struct mlx4_fw_crdump {
+       bool snapshot_enable;
+       struct devlink_region *region_crspace;
+       struct devlink_region *region_fw_health;
+};
+
 enum mlx4_pci_status {
        MLX4_PCI_STATUS_DISABLED,
        MLX4_PCI_STATUS_ENABLED,
@@ -871,6 +878,7 @@ struct mlx4_dev_persistent {
        u8      interface_state;
        struct mutex            pci_status_mutex; /* sync pci state */
        enum mlx4_pci_status    pci_status;
+       struct mlx4_fw_crdump   crdump;
 };
 
 struct mlx4_dev {
index d3c9db492b30065750726992ba1001c48153232b..fab5121ffb8f5de2b5f39b6a0a7e43cca4b047e0 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <linux/mlx5/driver.h>
 
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
+
 enum {
        SRIOV_NONE,
        SRIOV_LEGACY,
index 27134c4fcb76eb5140ff4828066e73e11d671cd9..ac281f5ec9b8077ba859f33eaf61e3f03ecdeb3d 100644 (file)
@@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         vnic_env_queue_counters[0x1];
        u8         ets[0x1];
        u8         nic_flow_table[0x1];
-       u8         eswitch_flow_table[0x1];
+       u8         eswitch_manager[0x1];
        u8         device_memory[0x1];
        u8         mcam_reg[0x1];
        u8         pcam_reg[0x1];
index 64d0f40d4cc36924ff051bfa14db27fe56a8af89..37e065a80a436f7ae53b77cda133da9bdcc0ef34 100644 (file)
@@ -576,6 +576,7 @@ struct mlx5_ifc_fpga_ipsec_sa {
 enum fpga_tls_cmds {
        CMD_SETUP_STREAM                = 0x1001,
        CMD_TEARDOWN_STREAM             = 0x1002,
+       CMD_RESYNC_RX                   = 0x1003,
 };
 
 #define MLX5_TLS_1_2 (0)
index 2014bd19f28eff41ae37b80eba324644c537e291..96a71a648eed991530489ecea56b89d8755b395c 100644 (file)
@@ -501,6 +501,7 @@ enum dmi_field {
        DMI_PRODUCT_VERSION,
        DMI_PRODUCT_SERIAL,
        DMI_PRODUCT_UUID,
+       DMI_PRODUCT_SKU,
        DMI_PRODUCT_FAMILY,
        DMI_BOARD_VENDOR,
        DMI_BOARD_NAME,
index d633f737b3c63f1eb137e55cd56bcdb2a873757a..6675b9f819798414a81e2b4104b3090392cd0ee6 100644 (file)
@@ -2,7 +2,7 @@
 #define __LINUX_MROUTE_BASE_H
 
 #include <linux/netdevice.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
 #include <linux/spinlock.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -254,6 +254,7 @@ struct mr_table {
        atomic_t                cache_resolve_queue_len;
        bool                    mroute_do_assert;
        bool                    mroute_do_pim;
+       bool                    mroute_do_wrvifwhole;
        int                     mroute_reg_vif_num;
 };
 
index 08b6eb964dd6865af3e1a7079a54b1e99f77e077..6554d3ba4396b3df49acac934ad16eeb71a695f4 100644 (file)
@@ -147,7 +147,6 @@ struct proto_ops {
        int             (*getname)   (struct socket *sock,
                                      struct sockaddr *addr,
                                      int peer);
-       __poll_t        (*poll_mask) (struct socket *sock, __poll_t events);
        __poll_t        (*poll)      (struct file *file, struct socket *sock,
                                      struct poll_table_struct *wait);
        int             (*ioctl)     (struct socket *sock, unsigned int cmd,
index 623bb8ced060046fdb2b856a147696ec1280be50..2b2a6dce16301d4d9b683dad7503b383d8708500 100644 (file)
@@ -79,6 +79,7 @@ enum {
        NETIF_F_HW_ESP_TX_CSUM_BIT,     /* ESP with TX checksum offload */
        NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
        NETIF_F_HW_TLS_TX_BIT,          /* Hardware TLS TX offload */
+       NETIF_F_HW_TLS_RX_BIT,          /* Hardware TLS RX offload */
 
        NETIF_F_GRO_HW_BIT,             /* Hardware Generic receive offload */
        NETIF_F_HW_TLS_RECORD_BIT,      /* Offload TLS record */
@@ -151,6 +152,7 @@ enum {
 #define NETIF_F_HW_TLS_RECORD  __NETIF_F(HW_TLS_RECORD)
 #define NETIF_F_GSO_UDP_L4     __NETIF_F(GSO_UDP_L4)
 #define NETIF_F_HW_TLS_TX      __NETIF_F(HW_TLS_TX)
+#define NETIF_F_HW_TLS_RX      __NETIF_F(HW_TLS_RX)
 
 #define for_each_netdev_feature(mask_addr, bit)        \
        for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
index 3ec9850c7936f01c0f7564dbe519e95ce0849639..c1295c7a452ec482dd31c9bb682da4168747f21d 100644 (file)
@@ -302,6 +302,17 @@ struct netdev_boot_setup {
 
 int __init netdev_boot_setup(char *str);
 
+struct gro_list {
+       struct list_head        list;
+       int                     count;
+};
+
+/*
+ * size of gro hash buckets, must less than bit number of
+ * napi_struct::gro_bitmask
+ */
+#define GRO_HASH_BUCKETS       8
+
 /*
  * Structure for NAPI scheduling similar to tasklet but with weighting
  */
@@ -316,13 +327,13 @@ struct napi_struct {
 
        unsigned long           state;
        int                     weight;
-       unsigned int            gro_count;
+       unsigned long           gro_bitmask;
        int                     (*poll)(struct napi_struct *, int);
 #ifdef CONFIG_NETPOLL
        int                     poll_owner;
 #endif
        struct net_device       *dev;
-       struct sk_buff          *gro_list;
+       struct gro_list         gro_hash[GRO_HASH_BUCKETS];
        struct sk_buff          *skb;
        struct hrtimer          timer;
        struct list_head        dev_list;
@@ -569,6 +580,9 @@ struct netdev_queue {
         * (/sys/class/net/DEV/Q/trans_timeout)
         */
        unsigned long           trans_timeout;
+
+       /* Subordinate device that the queue has been assigned to */
+       struct net_device       *sb_dev;
 /*
  * write-mostly part
  */
@@ -730,10 +744,15 @@ struct xps_map {
  */
 struct xps_dev_maps {
        struct rcu_head rcu;
-       struct xps_map __rcu *cpu_map[0];
+       struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
 };
-#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) +         \
+
+#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) +     \
        (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
+
+#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
+       (_rxqs * (_tcs) * sizeof(struct xps_map *)))
+
 #endif /* CONFIG_XPS */
 
 #define TC_MAX_QUEUE   16
@@ -779,7 +798,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
 }
 
 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
-                                      struct sk_buff *skb);
+                                      struct sk_buff *skb,
+                                      struct net_device *sb_dev);
 
 enum tc_setup_type {
        TC_SETUP_QDISC_MQPRIO,
@@ -792,6 +812,7 @@ enum tc_setup_type {
        TC_SETUP_QDISC_RED,
        TC_SETUP_QDISC_PRIO,
        TC_SETUP_QDISC_MQ,
+       TC_SETUP_QDISC_ETF,
 };
 
 /* These structures hold the attributes of bpf state that are being passed
@@ -807,11 +828,8 @@ enum bpf_netdev_command {
         */
        XDP_SETUP_PROG,
        XDP_SETUP_PROG_HW,
-       /* Check if a bpf program is set on the device.  The callee should
-        * set @prog_attached to one of XDP_ATTACHED_* values, note that "true"
-        * is equivalent to XDP_ATTACHED_DRV.
-        */
        XDP_QUERY_PROG,
+       XDP_QUERY_PROG_HW,
        /* BPF program for offload callbacks, invoked at program load time. */
        BPF_OFFLOAD_VERIFIER_PREP,
        BPF_OFFLOAD_TRANSLATE,
@@ -835,9 +853,8 @@ struct netdev_bpf {
                        struct bpf_prog *prog;
                        struct netlink_ext_ack *extack;
                };
-               /* XDP_QUERY_PROG */
+               /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
                struct {
-                       u8 prog_attached;
                        u32 prog_id;
                        /* flags with which program was installed */
                        u32 prog_flags;
@@ -891,6 +908,8 @@ struct tlsdev_ops {
        void (*tls_dev_del)(struct net_device *netdev,
                            struct tls_context *ctx,
                            enum tls_offload_ctx_dir direction);
+       void (*tls_dev_resync_rx)(struct net_device *netdev,
+                                 struct sock *sk, u32 seq, u64 rcd_sn);
 };
 #endif
 
@@ -942,7 +961,8 @@ struct dev_ifalias {
  *     those the driver believes to be appropriate.
  *
  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- *                         void *accel_priv, select_queue_fallback_t fallback);
+ *                         struct net_device *sb_dev,
+ *                         select_queue_fallback_t fallback);
  *     Called to decide which queue to use when device supports multiple
  *     transmit queues.
  *
@@ -1214,7 +1234,7 @@ struct net_device_ops {
                                                      netdev_features_t features);
        u16                     (*ndo_select_queue)(struct net_device *dev,
                                                    struct sk_buff *skb,
-                                                   void *accel_priv,
+                                                   struct net_device *sb_dev,
                                                    select_queue_fallback_t fallback);
        void                    (*ndo_change_rx_flags)(struct net_device *dev,
                                                       int flags);
@@ -1909,7 +1929,8 @@ struct net_device {
        int                     watchdog_timeo;
 
 #ifdef CONFIG_XPS
-       struct xps_dev_maps __rcu *xps_maps;
+       struct xps_dev_maps __rcu *xps_cpus_map;
+       struct xps_dev_maps __rcu *xps_rxqs_map;
 #endif
 #ifdef CONFIG_NET_CLS_ACT
        struct mini_Qdisc __rcu *miniq_egress;
@@ -1978,7 +1999,7 @@ struct net_device {
 #ifdef CONFIG_DCB
        const struct dcbnl_rtnl_ops *dcbnl_ops;
 #endif
-       u8                      num_tc;
+       s16                     num_tc;
        struct netdev_tc_txq    tc_to_txq[TC_MAX_QUEUE];
        u8                      prio_tc_map[TC_BITMASK + 1];
 
@@ -2032,6 +2053,17 @@ int netdev_get_num_tc(struct net_device *dev)
        return dev->num_tc;
 }
 
+void netdev_unbind_sb_channel(struct net_device *dev,
+                             struct net_device *sb_dev);
+int netdev_bind_sb_channel_queue(struct net_device *dev,
+                                struct net_device *sb_dev,
+                                u8 tc, u16 count, u16 offset);
+int netdev_set_sb_channel(struct net_device *dev, u16 channel);
+static inline int netdev_get_sb_channel(struct net_device *dev)
+{
+       return max_t(int, -dev->num_tc, 0);
+}
+
 static inline
 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
                                         unsigned int index)
@@ -2076,7 +2108,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
 
 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
                                    struct sk_buff *skb,
-                                   void *accel_priv);
+                                   struct net_device *sb_dev);
 
 /* returns the headroom that the master device needs to take in account
  * when forwarding to this dev
@@ -2255,10 +2287,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb)
        return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
 }
 
-typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
-static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
-                                               struct sk_buff **head,
-                                               struct sk_buff *skb)
+typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
+static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
+                                              struct list_head *head,
+                                              struct sk_buff *skb)
 {
        if (unlikely(gro_recursion_inc_test(skb))) {
                NAPI_GRO_CB(skb)->flush |= 1;
@@ -2268,12 +2300,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
        return cb(head, skb);
 }
 
-typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
-                                            struct sk_buff *);
-static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
-                                                  struct sock *sk,
-                                                  struct sk_buff **head,
-                                                  struct sk_buff *skb)
+typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
+                                           struct sk_buff *);
+static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
+                                                 struct sock *sk,
+                                                 struct list_head *head,
+                                                 struct sk_buff *skb)
 {
        if (unlikely(gro_recursion_inc_test(skb))) {
                NAPI_GRO_CB(skb)->flush |= 1;
@@ -2290,6 +2322,9 @@ struct packet_type {
                                         struct net_device *,
                                         struct packet_type *,
                                         struct net_device *);
+       void                    (*list_func) (struct list_head *,
+                                             struct packet_type *,
+                                             struct net_device *);
        bool                    (*id_match)(struct packet_type *ptype,
                                            struct sock *sk);
        void                    *af_packet_priv;
@@ -2299,8 +2334,8 @@ struct packet_type {
 struct offload_callbacks {
        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
                                                netdev_features_t features);
-       struct sk_buff          **(*gro_receive)(struct sk_buff **head,
-                                                struct sk_buff *skb);
+       struct sk_buff          *(*gro_receive)(struct list_head *head,
+                                               struct sk_buff *skb);
        int                     (*gro_complete)(struct sk_buff *skb, int nhoff);
 };
 
@@ -2537,8 +2572,14 @@ void dev_close(struct net_device *dev);
 void dev_close_many(struct list_head *head, bool unlink);
 void dev_disable_lro(struct net_device *dev);
 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
+u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+                    struct net_device *sb_dev,
+                    select_queue_fallback_t fallback);
+u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback);
 int dev_queue_xmit(struct sk_buff *skb);
-int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
+int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
 int register_netdevice(struct net_device *dev);
 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2568,7 +2609,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 struct net_device *dev_get_by_napi_id(unsigned int napi_id);
 int netdev_get_name(struct net *net, char *name, int ifindex);
 int dev_restart(struct net_device *dev);
-int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
 
 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 {
@@ -2784,16 +2825,36 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
 }
 
 #ifdef CONFIG_XFRM_OFFLOAD
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
 {
        if (PTR_ERR(pp) != -EINPROGRESS)
                NAPI_GRO_CB(skb)->flush |= flush;
 }
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+                                              struct sk_buff *pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+{
+       if (PTR_ERR(pp) != -EINPROGRESS) {
+               NAPI_GRO_CB(skb)->flush |= flush;
+               skb_gro_remcsum_cleanup(skb, grc);
+               skb->remcsum_offload = 0;
+       }
+}
 #else
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
 {
        NAPI_GRO_CB(skb)->flush |= flush;
 }
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+                                              struct sk_buff *pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+{
+       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_remcsum_cleanup(skb, grc);
+       skb->remcsum_offload = 0;
+}
 #endif
 
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -3258,6 +3319,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
 #ifdef CONFIG_XPS
 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
                        u16 index);
+int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
+                         u16 index, bool is_rxqs_map);
+
+/**
+ *     netif_attr_test_mask - Test a CPU or Rx queue set in a mask
+ *     @j: CPU/Rx queue index
+ *     @mask: bitmask of all cpus/rx queues
+ *     @nr_bits: number of bits in the bitmask
+ *
+ * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
+ */
+static inline bool netif_attr_test_mask(unsigned long j,
+                                       const unsigned long *mask,
+                                       unsigned int nr_bits)
+{
+       cpu_max_bits_warn(j, nr_bits);
+       return test_bit(j, mask);
+}
+
+/**
+ *     netif_attr_test_online - Test for online CPU/Rx queue
+ *     @j: CPU/Rx queue index
+ *     @online_mask: bitmask for CPUs/Rx queues that are online
+ *     @nr_bits: number of bits in the bitmask
+ *
+ * Returns true if a CPU/Rx queue is online.
+ */
+static inline bool netif_attr_test_online(unsigned long j,
+                                         const unsigned long *online_mask,
+                                         unsigned int nr_bits)
+{
+       cpu_max_bits_warn(j, nr_bits);
+
+       if (online_mask)
+               return test_bit(j, online_mask);
+
+       return (j < nr_bits);
+}
+
+/**
+ *     netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
+ *     @n: CPU/Rx queue index
+ *     @srcp: the cpumask/Rx queue mask pointer
+ *     @nr_bits: number of bits in the bitmask
+ *
+ * Returns >= nr_bits if no further CPUs/Rx queues set.
+ */
+static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
+                                              unsigned int nr_bits)
+{
+       /* -1 is a legal arg here. */
+       if (n != -1)
+               cpu_max_bits_warn(n, nr_bits);
+
+       if (srcp)
+               return find_next_bit(srcp, nr_bits, n + 1);
+
+       return n + 1;
+}
+
+/**
+ *     netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
+ *     @n: CPU/Rx queue index
+ *     @src1p: the first CPUs/Rx queues mask pointer
+ *     @src2p: the second CPUs/Rx queues mask pointer
+ *     @nr_bits: number of bits in the bitmask
+ *
+ * Returns >= nr_bits if no further CPUs/Rx queues set in both.
+ */
+static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
+                                         const unsigned long *src2p,
+                                         unsigned int nr_bits)
+{
+       /* -1 is a legal arg here. */
+       if (n != -1)
+               cpu_max_bits_warn(n, nr_bits);
+
+       if (src1p && src2p)
+               return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
+       else if (src1p)
+               return find_next_bit(src1p, nr_bits, n + 1);
+       else if (src2p)
+               return find_next_bit(src2p, nr_bits, n + 1);
+
+       return n + 1;
+}
 #else
 static inline int netif_set_xps_queue(struct net_device *dev,
                                      const struct cpumask *mask,
@@ -3364,6 +3511,7 @@ int netif_rx(struct sk_buff *skb);
 int netif_rx_ni(struct sk_buff *skb);
 int netif_receive_skb(struct sk_buff *skb);
 int netif_receive_skb_core(struct sk_buff *skb);
+void netif_receive_skb_list(struct list_head *head);
 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
 struct sk_buff *napi_get_frags(struct napi_struct *napi);
@@ -3415,8 +3563,8 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
                      int fd, u32 flags);
-void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
-                    struct netdev_bpf *xdp);
+u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
+                   enum bpf_netdev_command cmd);
 
 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
index dd2052f0efb7742f881cdcf334edc570c7d4d790..07efffd0c759d0b509dec19acb6b718cbab06031 100644 (file)
@@ -288,6 +288,24 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct
        return ret;
 }
 
+static inline void
+NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+            struct list_head *head, struct net_device *in, struct net_device *out,
+            int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+       struct sk_buff *skb, *next;
+       struct list_head sublist;
+
+       INIT_LIST_HEAD(&sublist);
+       list_for_each_entry_safe(skb, next, head, list) {
+               list_del(&skb->list);
+               if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
+                       list_add_tail(&skb->list, &sublist);
+       }
+       /* Put passed packets back on main list */
+       list_splice(&sublist, head);
+}
+
 /* Call setsockopt() */
 int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
                  unsigned int len);
@@ -369,6 +387,14 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
        return okfn(net, sk, skb);
 }
 
+static inline void
+NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+            struct list_head *head, struct net_device *in, struct net_device *out,
+            int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+       /* nothing to do */
+}
+
 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
                          struct sock *sk, struct sk_buff *skb,
                          struct net_device *indev, struct net_device *outdev,
@@ -388,8 +414,17 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
 void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+struct nf_conntrack_tuple;
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+                        const struct sk_buff *skb);
 #else
 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+struct nf_conntrack_tuple;
+static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+                                      const struct sk_buff *skb)
+{
+       return false;
+}
 #endif
 
 struct nf_conn;
@@ -398,6 +433,8 @@ enum ip_conntrack_info;
 struct nf_ct_hook {
        int (*update)(struct net *net, struct sk_buff *skb);
        void (*destroy)(struct nf_conntrack *);
+       bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
+                             const struct sk_buff *);
 };
 extern struct nf_ct_hook __rcu *nf_ct_hook;
 
index 9dee3c23895d82fae05025961fe83d15b23d45b7..712eed156d0912f1aecc97de222597f1d7cc5dc9 100644 (file)
@@ -1438,6 +1438,8 @@ enum {
        NFS_IOHDR_EOF,
        NFS_IOHDR_REDO,
        NFS_IOHDR_STAT,
+       NFS_IOHDR_RESEND_PNFS,
+       NFS_IOHDR_RESEND_MDS,
 };
 
 struct nfs_io_completion;
index e6b240b6196cae3a7dad22693af3857849fb0c8f..379affc63e24c64937c503f6e87afed63bbb8db9 100644 (file)
@@ -21,4 +21,9 @@
 
 #include <uapi/linux/openvswitch.h>
 
+#define OVS_CLONE_ATTR_EXEC      0   /* Specify an u32 value. When nonzero,
+                                     * actions in clone will not change flow
+                                     * keys. False otherwise.
+                                     */
+
 #endif /* _LINUX_OPENVSWITCH_H */
index 6cd09098427c5bcd93d58f1d842165c2b9ffe7c0..075c2f770d3ef098b39dd8d0746416382d1d36ce 100644 (file)
@@ -942,6 +942,8 @@ void phy_start(struct phy_device *phydev);
 void phy_stop(struct phy_device *phydev);
 int phy_start_aneg(struct phy_device *phydev);
 int phy_aneg_done(struct phy_device *phydev);
+int phy_speed_down(struct phy_device *phydev, bool sync);
+int phy_speed_up(struct phy_device *phydev);
 
 int phy_stop_interrupts(struct phy_device *phydev);
 int phy_restart_aneg(struct phy_device *phydev);
index 9206a4fef9ac151905a825700c6ae7477d7cbd88..cb8d84090cfb7adb478d156727279aa48686d816 100644 (file)
@@ -234,7 +234,7 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
 int of_genpd_parse_idle_states(struct device_node *dn,
                               struct genpd_power_state **states, int *n);
 unsigned int of_genpd_opp_to_performance_state(struct device *dev,
-                               struct device_node *opp_node);
+                               struct device_node *np);
 
 int genpd_dev_pm_attach(struct device *dev);
 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -274,9 +274,9 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
 
 static inline unsigned int
 of_genpd_opp_to_performance_state(struct device *dev,
-                                 struct device_node *opp_node)
+                                 struct device_node *np)
 {
-       return -ENODEV;
+       return 0;
 }
 
 static inline int genpd_dev_pm_attach(struct device *dev)
index fdf86b4cbc71bacca2795107532fb75e3855c0c9..7e0fdcf905d2e77b355c94a7381446927452723c 100644 (file)
@@ -74,18 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
        pt->_key   = ~(__poll_t)0; /* all events enabled */
 }
 
-static inline bool file_has_poll_mask(struct file *file)
+static inline bool file_can_poll(struct file *file)
 {
-       return file->f_op->get_poll_head && file->f_op->poll_mask;
+       return file->f_op->poll;
 }
 
-static inline bool file_can_poll(struct file *file)
+static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
 {
-       return file->f_op->poll || file_has_poll_mask(file);
+       if (unlikely(!file->f_op->poll))
+               return DEFAULT_POLLMASK;
+       return file->f_op->poll(file, pt);
 }
 
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt);
-
 struct poll_table_entry {
        struct file *filp;
        __poll_t key;
index e031e9f2f9d85f1330a9f38b1a82c7f322f00f3e..585ce89c0f336d0d5ab6895be6ccf9fe1b0a1c02 100644 (file)
@@ -25,6 +25,9 @@ struct reciprocal_value {
        u8 sh1, sh2;
 };
 
+/* "reciprocal_value" and "reciprocal_divide" together implement the basic
+ * version of the algorithm described in Figure 4.1 of the paper.
+ */
 struct reciprocal_value reciprocal_value(u32 d);
 
 static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
@@ -33,4 +36,69 @@ static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
        return (t + ((a - t) >> R.sh1)) >> R.sh2;
 }
 
+struct reciprocal_value_adv {
+       u32 m;
+       u8 sh, exp;
+       bool is_wide_m;
+};
+
+/* "reciprocal_value_adv" implements the advanced version of the algorithm
+ * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose
+ * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The
+ * exception case could be easily handled before calling "reciprocal_value_adv".
+ *
+ * The advanced version requires more complex calculation to get the reciprocal
+ * multiplier and other control variables, but then could reduce the required
+ * emulation operations.
+ *
+ * It makes no sense to use this advanced version for host divide emulation,
+ * those extra complexities for calculating multiplier etc could completely
+ * waive our saving on emulation operations.
+ *
+ * However, it makes sense to use it for JIT divide code generation for which
+ * we are willing to trade performance of JITed code with that of host. As shown
+ * by the following pseudo code, the required emulation operations could go down
+ * from 6 (the basic version) to 3 or 4.
+ *
+ * To use the result of "reciprocal_value_adv", suppose we want to calculate
+ * n/d, the pseudo C code will be:
+ *
+ *   struct reciprocal_value_adv rvalue;
+ *   u8 pre_shift, exp;
+ *
+ *   // handle exception case.
+ *   if (d >= (1U << 31)) {
+ *     result = n >= d;
+ *     return;
+ *   }
+ *
+ *   rvalue = reciprocal_value_adv(d, 32)
+ *   exp = rvalue.exp;
+ *   if (rvalue.is_wide_m && !(d & 1)) {
+ *     // floor(log2(d & (2^32 -d)))
+ *     pre_shift = fls(d & -d) - 1;
+ *     rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift);
+ *   } else {
+ *     pre_shift = 0;
+ *   }
+ *
+ *   // code generation starts.
+ *   if (imm == 1U << exp) {
+ *     result = n >> exp;
+ *   } else if (rvalue.is_wide_m) {
+ *     // pre_shift must be zero when reached here.
+ *     t = (n * rvalue.m) >> 32;
+ *     result = n - t;
+ *     result >>= 1;
+ *     result += t;
+ *     result >>= rvalue.sh - 1;
+ *   } else {
+ *     if (pre_shift)
+ *       result = n >> pre_shift;
+ *     result = ((u64)result * rvalue.m) >> 32;
+ *     result >>= rvalue.sh;
+ *   }
+ */
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec);
+
 #endif /* _LINUX_RECIPROCAL_DIV_H */
index 4193c41e383a897273605aac39f331b46512691a..a685da2c4522b5583ec405d0c0ff49da6aa718c9 100644 (file)
@@ -98,5 +98,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
 extern __must_check bool refcount_dec_not_one(refcount_t *r);
 extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
 extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
-
+extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
+                                                      spinlock_t *lock,
+                                                      unsigned long *flags);
 #endif /* _LINUX_REFCOUNT_H */
index e6a0031d1b1fdc1793200d1fc93f48bf1973ecfc..8ad2487a86d5b261126e0a3c05357356cb039078 100644 (file)
@@ -66,7 +66,7 @@ struct rfkill_ops {
 
 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
 /**
- * rfkill_alloc - allocate rfkill structure
+ * rfkill_alloc - Allocate rfkill structure
  * @name: name of the struct -- the string is not copied internally
  * @parent: device that has rf switch on it
  * @type: type of the switch (RFKILL_TYPE_*)
@@ -112,7 +112,7 @@ void rfkill_pause_polling(struct rfkill *rfkill);
 /**
  * rfkill_resume_polling(struct rfkill *rfkill)
  *
- * Pause polling -- say transmitter is off for other reasons.
+ * Resume polling
  * NOTE: not necessary for suspend/resume -- in that case the
  * core stops polling anyway
  */
@@ -130,7 +130,7 @@ void rfkill_resume_polling(struct rfkill *rfkill);
 void rfkill_unregister(struct rfkill *rfkill);
 
 /**
- * rfkill_destroy - free rfkill structure
+ * rfkill_destroy - Free rfkill structure
  * @rfkill: rfkill structure to be destroyed
  *
  * Destroys the rfkill structure.
@@ -140,7 +140,7 @@ void rfkill_destroy(struct rfkill *rfkill);
 /**
  * rfkill_set_hw_state - Set the internal rfkill hardware block state
  * @rfkill: pointer to the rfkill class to modify.
- * @state: the current hardware block state to set
+ * @blocked: the current hardware block state to set
  *
  * rfkill drivers that get events when the hard-blocked state changes
  * use this function to notify the rfkill core (and through that also
@@ -161,7 +161,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
 /**
  * rfkill_set_sw_state - Set the internal rfkill software block state
  * @rfkill: pointer to the rfkill class to modify.
- * @state: the current software block state to set
+ * @blocked: the current software block state to set
  *
  * rfkill drivers that get events when the soft-blocked state changes
  * (yes, some platforms directly act on input but allow changing again)
@@ -183,7 +183,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
 /**
  * rfkill_init_sw_state - Initialize persistent software block state
  * @rfkill: pointer to the rfkill class to modify.
- * @state: the current software block state to set
+ * @blocked: the current software block state to set
  *
  * rfkill drivers that preserve their software block state over power off
  * use this function to notify the rfkill core (and through that also
@@ -208,17 +208,17 @@ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
 
 /**
- * rfkill_blocked - query rfkill block
+ * rfkill_blocked - Query rfkill block state
  *
  * @rfkill: rfkill struct to query
  */
 bool rfkill_blocked(struct rfkill *rfkill);
 
 /**
- * rfkill_find_type - Helpper for finding rfkill type by name
+ * rfkill_find_type - Helper for finding rfkill type by name
  * @name: the name of the type
  *
- * Returns enum rfkill_type that conrresponds the name.
+ * Returns enum rfkill_type that corresponds to the name.
  */
 enum rfkill_type rfkill_find_type(const char *name);
 
@@ -296,7 +296,7 @@ static inline enum rfkill_type rfkill_find_type(const char *name)
 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
 
 /**
- * rfkill_set_led_trigger_name -- set the LED trigger name
+ * rfkill_set_led_trigger_name - Set the LED trigger name
  * @rfkill: rfkill struct
  * @name: LED trigger name
  *
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
new file mode 100644 (file)
index 0000000..763d613
--- /dev/null
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Simple structures that might be needed in include
+ * files.
+ */
+
+#ifndef _LINUX_RHASHTABLE_TYPES_H
+#define _LINUX_RHASHTABLE_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct rhash_head {
+       struct rhash_head __rcu         *next;
+};
+
+struct rhlist_head {
+       struct rhash_head               rhead;
+       struct rhlist_head __rcu        *next;
+};
+
+struct bucket_table;
+
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+       struct rhashtable *ht;
+       const void *key;
+};
+
+typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+                              const void *obj);
+
+/**
+ * struct rhashtable_params - Hash table construction parameters
+ * @nelem_hint: Hint on number of elements, should be 75% of desired size
+ * @key_len: Length of key
+ * @key_offset: Offset of key in struct to be hashed
+ * @head_offset: Offset of rhash_head in struct to be hashed
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
+ * @automatic_shrinking: Enable automatic shrinking of tables
+ * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
+ * @obj_hashfn: Function to hash object
+ * @obj_cmpfn: Function to compare key with object
+ */
+struct rhashtable_params {
+       u16                     nelem_hint;
+       u16                     key_len;
+       u16                     key_offset;
+       u16                     head_offset;
+       unsigned int            max_size;
+       u16                     min_size;
+       bool                    automatic_shrinking;
+       u8                      locks_mul;
+       rht_hashfn_t            hashfn;
+       rht_obj_hashfn_t        obj_hashfn;
+       rht_obj_cmpfn_t         obj_cmpfn;
+};
+
+/**
+ * struct rhashtable - Hash table handle
+ * @tbl: Bucket table
+ * @key_len: Key length for hashfn
+ * @max_elems: Maximum number of elements in table
+ * @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
+ */
+struct rhashtable {
+       struct bucket_table __rcu       *tbl;
+       unsigned int                    key_len;
+       unsigned int                    max_elems;
+       struct rhashtable_params        p;
+       bool                            rhlist;
+       struct work_struct              run_work;
+       struct mutex                    mutex;
+       spinlock_t                      lock;
+       atomic_t                        nelems;
+};
+
+/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+       struct rhashtable ht;
+};
+
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @tbl: The table that we were walking over
+ */
+struct rhashtable_walker {
+       struct list_head list;
+       struct bucket_table *tbl;
+};
+
+/**
+ * struct rhashtable_iter - Hash table iterator
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @list: Current hash list pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+       struct rhashtable *ht;
+       struct rhash_head *p;
+       struct rhlist_head *list;
+       struct rhashtable_walker walker;
+       unsigned int slot;
+       unsigned int skip;
+       bool end_of_table;
+};
+
+int rhashtable_init(struct rhashtable *ht,
+                   const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+                 const struct rhashtable_params *params);
+
+#endif /* _LINUX_RHASHTABLE_TYPES_H */
index 4e1f535c2034e8d292f3d5b5bd569423878bab58..eb71110392479784db1d4a0b9d86d2eee6631789 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Resizable, Scalable, Concurrent Hash Table
  *
 #ifndef _LINUX_RHASHTABLE_H
 #define _LINUX_RHASHTABLE_H
 
-#include <linux/atomic.h>
-#include <linux/compiler.h>
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/jhash.h>
 #include <linux/list_nulls.h>
 #include <linux/workqueue.h>
-#include <linux/mutex.h>
 #include <linux/rculist.h>
 
+#include <linux/rhashtable-types.h>
 /*
  * The end of the chain is marked with a special nulls marks which has
- * the following format:
- *
- * +-------+-----------------------------------------------------+-+
- * | Base  |                      Hash                           |1|
- * +-------+-----------------------------------------------------+-+
- *
- * Base (4 bits) : Reserved to distinguish between multiple tables.
- *                 Specified via &struct rhashtable_params.nulls_base.
- * Hash (27 bits): Full hash (unmasked) of first element added to bucket
- * 1 (1 bit)     : Nulls marker (always set)
- *
- * The remaining bits of the next pointer remain unused for now.
+ * the least significant bit set.
  */
-#define RHT_BASE_BITS          4
-#define RHT_HASH_BITS          27
-#define RHT_BASE_SHIFT         RHT_HASH_BITS
-
-/* Base bits plus 1 bit for nulls marker */
-#define RHT_HASH_RESERVED_SPACE        (RHT_BASE_BITS + 1)
 
 /* Maximum chain length before rehash
  *
  */
 #define RHT_ELASTICITY 16u
 
-struct rhash_head {
-       struct rhash_head __rcu         *next;
-};
-
-struct rhlist_head {
-       struct rhash_head               rhead;
-       struct rhlist_head __rcu        *next;
-};
-
 /**
  * struct bucket_table - Table of hash buckets
  * @size: Number of hash buckets
@@ -102,132 +75,14 @@ struct bucket_table {
        struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
 };
 
-/**
- * struct rhashtable_compare_arg - Key for the function rhashtable_compare
- * @ht: Hash table
- * @key: Key to compare against
- */
-struct rhashtable_compare_arg {
-       struct rhashtable *ht;
-       const void *key;
-};
-
-typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
-                              const void *obj);
-
-struct rhashtable;
-
-/**
- * struct rhashtable_params - Hash table construction parameters
- * @nelem_hint: Hint on number of elements, should be 75% of desired size
- * @key_len: Length of key
- * @key_offset: Offset of key in struct to be hashed
- * @head_offset: Offset of rhash_head in struct to be hashed
- * @max_size: Maximum size while expanding
- * @min_size: Minimum size while shrinking
- * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
- * @automatic_shrinking: Enable automatic shrinking of tables
- * @nulls_base: Base value to generate nulls marker
- * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
- * @obj_hashfn: Function to hash object
- * @obj_cmpfn: Function to compare key with object
- */
-struct rhashtable_params {
-       u16                     nelem_hint;
-       u16                     key_len;
-       u16                     key_offset;
-       u16                     head_offset;
-       unsigned int            max_size;
-       u16                     min_size;
-       bool                    automatic_shrinking;
-       u8                      locks_mul;
-       u32                     nulls_base;
-       rht_hashfn_t            hashfn;
-       rht_obj_hashfn_t        obj_hashfn;
-       rht_obj_cmpfn_t         obj_cmpfn;
-};
-
-/**
- * struct rhashtable - Hash table handle
- * @tbl: Bucket table
- * @key_len: Key length for hashfn
- * @max_elems: Maximum number of elements in table
- * @p: Configuration parameters
- * @rhlist: True if this is an rhltable
- * @run_work: Deferred worker to expand/shrink asynchronously
- * @mutex: Mutex to protect current/future table swapping
- * @lock: Spin lock to protect walker list
- * @nelems: Number of elements in table
- */
-struct rhashtable {
-       struct bucket_table __rcu       *tbl;
-       unsigned int                    key_len;
-       unsigned int                    max_elems;
-       struct rhashtable_params        p;
-       bool                            rhlist;
-       struct work_struct              run_work;
-       struct mutex                    mutex;
-       spinlock_t                      lock;
-       atomic_t                        nelems;
-};
-
-/**
- * struct rhltable - Hash table with duplicate objects in a list
- * @ht: Underlying rhtable
- */
-struct rhltable {
-       struct rhashtable ht;
-};
-
-/**
- * struct rhashtable_walker - Hash table walker
- * @list: List entry on list of walkers
- * @tbl: The table that we were walking over
- */
-struct rhashtable_walker {
-       struct list_head list;
-       struct bucket_table *tbl;
-};
-
-/**
- * struct rhashtable_iter - Hash table iterator
- * @ht: Table to iterate through
- * @p: Current pointer
- * @list: Current hash list pointer
- * @walker: Associated rhashtable walker
- * @slot: Current slot
- * @skip: Number of entries to skip in slot
- */
-struct rhashtable_iter {
-       struct rhashtable *ht;
-       struct rhash_head *p;
-       struct rhlist_head *list;
-       struct rhashtable_walker walker;
-       unsigned int slot;
-       unsigned int skip;
-       bool end_of_table;
-};
-
-static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
-{
-       return NULLS_MARKER(ht->p.nulls_base + hash);
-}
-
-#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
-       ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+#define INIT_RHT_NULLS_HEAD(ptr)       \
+       ((ptr) = (typeof(ptr)) NULLS_MARKER(0))
 
 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
 {
        return ((unsigned long) ptr & 1);
 }
 
-static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
-{
-       return ((unsigned long) ptr) >> 1;
-}
-
 static inline void *rht_obj(const struct rhashtable *ht,
                            const struct rhash_head *he)
 {
@@ -237,7 +92,7 @@ static inline void *rht_obj(const struct rhashtable *ht,
 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
                                            unsigned int hash)
 {
-       return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+       return hash & (tbl->size - 1);
 }
 
 static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
@@ -376,11 +231,6 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
 }
 #endif /* CONFIG_PROVE_LOCKING */
 
-int rhashtable_init(struct rhashtable *ht,
-                   const struct rhashtable_params *params);
-int rhltable_init(struct rhltable *hlt,
-                 const struct rhashtable_params *params);
-
 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
                             struct rhash_head *obj);
 
@@ -745,7 +595,7 @@ static inline void *__rhashtable_insert_fast(
        lock = rht_bucket_lock(tbl, hash);
        spin_lock_bh(lock);
 
-       if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
+       if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
 slow_path:
                spin_unlock_bh(lock);
                rcu_read_unlock();
index 64125443f8a638e787adfbaebab4755f5d63852a..5ef5c7c412a75b5f24e276563d278be8ced3b212 100644 (file)
@@ -354,6 +354,8 @@ struct rmi_driver_data {
        struct mutex irq_mutex;
        struct input_dev *input;
 
+       struct irq_domain *irqdomain;
+
        u8 pdt_props;
 
        u8 num_rx_electrodes;
index 51f52020ad5fdd44ab4fdfa6ad2e0063c4780947..093aa57120b0cf1f40c2a75f28612331c6e6f6e0 100644 (file)
@@ -9,9 +9,6 @@
 #include <asm/io.h>
 
 struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-       unsigned long   sg_magic;
-#endif
        unsigned long   page_link;
        unsigned int    offset;
        unsigned int    length;
@@ -64,7 +61,6 @@ struct sg_table {
  *
  */
 
-#define SG_MAGIC       0x87654321
 #define SG_CHAIN       0x01UL
 #define SG_END         0x02UL
 
@@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
         */
        BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        sg->page_link = page_link | (unsigned long) page;
@@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
 static inline struct page *sg_page(struct scatterlist *sg)
 {
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
@@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
  **/
 static inline void sg_mark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        /*
         * Set termination bit, clear potential chain bit
         */
@@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
  **/
 static inline void sg_unmark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        sg->page_link &= ~SG_END;
 }
 
@@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg)
 static inline void sg_init_marker(struct scatterlist *sgl,
                                  unsigned int nents)
 {
-#ifdef CONFIG_DEBUG_SG
-       unsigned int i;
-
-       for (i = 0; i < nents; i++)
-               sgl[i].sg_magic = SG_MAGIC;
-#endif
        sg_mark_end(&sgl[nents - 1]);
 }
 
index 87bf02d93a279a9b98df452c7ad78a0b54adc1db..9256118bd40c2bc74bd7bbcbcda8225214c8ab74 100644 (file)
@@ -1799,20 +1799,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
                set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
 }
 
-void __rseq_handle_notify_resume(struct pt_regs *regs);
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
 
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+                                            struct pt_regs *regs)
 {
        if (current->rseq)
-               __rseq_handle_notify_resume(regs);
+               __rseq_handle_notify_resume(ksig, regs);
 }
 
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+                                      struct pt_regs *regs)
 {
        preempt_disable();
        __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
        preempt_enable();
-       rseq_handle_notify_resume(regs);
+       rseq_handle_notify_resume(ksig, regs);
 }
 
 /* rseq_preempt() requires preemption to be disabled. */
@@ -1831,9 +1833,7 @@ static inline void rseq_migrate(struct task_struct *t)
 
 /*
  * If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread. In
- * case a parent fork() in the middle of a restartable sequence, set the
- * resume notifier to force the child to retry.
+ * child inherits. Only applies when forking a process, not a thread.
  */
 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
 {
@@ -1847,7 +1847,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
                t->rseq_len = current->rseq_len;
                t->rseq_sig = current->rseq_sig;
                t->rseq_event_mask = current->rseq_event_mask;
-               rseq_preempt(t);
        }
 }
 
@@ -1864,10 +1863,12 @@ static inline void rseq_execve(struct task_struct *t)
 static inline void rseq_set_notify_resume(struct task_struct *t)
 {
 }
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+                                            struct pt_regs *regs)
 {
 }
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+                                      struct pt_regs *regs)
 {
 }
 static inline void rseq_preempt(struct task_struct *t)
index b36c76635f182007584076ff5fb4d644cb134a0f..83d94341e0032795035b9545483f3cc32a6d9500 100644 (file)
@@ -801,4 +801,11 @@ struct sctp_strreset_resptsn {
        __be32 receivers_next_tsn;
 };
 
+enum {
+       SCTP_DSCP_SET_MASK = 0x1,
+       SCTP_DSCP_VAL_MASK = 0xfc,
+       SCTP_FLOWLABEL_SET_MASK = 0x100000,
+       SCTP_FLOWLABEL_VAL_MASK = 0xfffff
+};
+
 #endif /* __LINUX_SCTP_H__ */
index ebce9e24906a7121c6c24c806be252dbd7887334..d37518e89db2ddea762bb483cc74c13040a0f803 100644 (file)
@@ -231,6 +231,50 @@ struct sfp_eeprom_id {
        struct sfp_eeprom_ext ext;
 } __packed;
 
+struct sfp_diag {
+       __be16 temp_high_alarm;
+       __be16 temp_low_alarm;
+       __be16 temp_high_warn;
+       __be16 temp_low_warn;
+       __be16 volt_high_alarm;
+       __be16 volt_low_alarm;
+       __be16 volt_high_warn;
+       __be16 volt_low_warn;
+       __be16 bias_high_alarm;
+       __be16 bias_low_alarm;
+       __be16 bias_high_warn;
+       __be16 bias_low_warn;
+       __be16 txpwr_high_alarm;
+       __be16 txpwr_low_alarm;
+       __be16 txpwr_high_warn;
+       __be16 txpwr_low_warn;
+       __be16 rxpwr_high_alarm;
+       __be16 rxpwr_low_alarm;
+       __be16 rxpwr_high_warn;
+       __be16 rxpwr_low_warn;
+       __be16 laser_temp_high_alarm;
+       __be16 laser_temp_low_alarm;
+       __be16 laser_temp_high_warn;
+       __be16 laser_temp_low_warn;
+       __be16 tec_cur_high_alarm;
+       __be16 tec_cur_low_alarm;
+       __be16 tec_cur_high_warn;
+       __be16 tec_cur_low_warn;
+       __be32 cal_rxpwr4;
+       __be32 cal_rxpwr3;
+       __be32 cal_rxpwr2;
+       __be32 cal_rxpwr1;
+       __be32 cal_rxpwr0;
+       __be16 cal_txi_slope;
+       __be16 cal_txi_offset;
+       __be16 cal_txpwr_slope;
+       __be16 cal_txpwr_offset;
+       __be16 cal_t_slope;
+       __be16 cal_t_offset;
+       __be16 cal_v_slope;
+       __be16 cal_v_offset;
+} __packed;
+
 /* SFP EEPROM registers */
 enum {
        SFP_PHYS_ID                     = 0x00,
@@ -384,7 +428,33 @@ enum {
        SFP_TEC_CUR                     = 0x6c,
 
        SFP_STATUS                      = 0x6e,
-       SFP_ALARM                       = 0x70,
+       SFP_ALARM0                      = 0x70,
+       SFP_ALARM0_TEMP_HIGH            = BIT(7),
+       SFP_ALARM0_TEMP_LOW             = BIT(6),
+       SFP_ALARM0_VCC_HIGH             = BIT(5),
+       SFP_ALARM0_VCC_LOW              = BIT(4),
+       SFP_ALARM0_TX_BIAS_HIGH         = BIT(3),
+       SFP_ALARM0_TX_BIAS_LOW          = BIT(2),
+       SFP_ALARM0_TXPWR_HIGH           = BIT(1),
+       SFP_ALARM0_TXPWR_LOW            = BIT(0),
+
+       SFP_ALARM1                      = 0x71,
+       SFP_ALARM1_RXPWR_HIGH           = BIT(7),
+       SFP_ALARM1_RXPWR_LOW            = BIT(6),
+
+       SFP_WARN0                       = 0x74,
+       SFP_WARN0_TEMP_HIGH             = BIT(7),
+       SFP_WARN0_TEMP_LOW              = BIT(6),
+       SFP_WARN0_VCC_HIGH              = BIT(5),
+       SFP_WARN0_VCC_LOW               = BIT(4),
+       SFP_WARN0_TX_BIAS_HIGH          = BIT(3),
+       SFP_WARN0_TX_BIAS_LOW           = BIT(2),
+       SFP_WARN0_TXPWR_HIGH            = BIT(1),
+       SFP_WARN0_TXPWR_LOW             = BIT(0),
+
+       SFP_WARN1                       = 0x75,
+       SFP_WARN1_RXPWR_HIGH            = BIT(7),
+       SFP_WARN1_RXPWR_LOW             = BIT(6),
 
        SFP_EXT_STATUS                  = 0x76,
        SFP_VSL                         = 0x78,
index c8688595499421d9f051366d4a85e5553751768e..3ceb8dcc54da45bb305b4fcadbb31c766308d95f 100644 (file)
@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
  *     @hash: the packet hash
  *     @queue_mapping: Queue mapping for multiqueue devices
  *     @xmit_more: More SKBs are pending for this queue
+ *     @decrypted: Decrypted SKB
  *     @ndisc_nodetype: router type (from link layer)
  *     @ooo_okay: allow the mapping of a socket to a queue to be changed
  *     @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -677,7 +678,8 @@ struct sk_buff {
                                int                     ip_defrag_offset;
                        };
                };
-               struct rb_node  rbnode; /* used in netem & tcp stack */
+               struct rb_node          rbnode; /* used in netem & tcp stack */
+               struct list_head        list;
        };
        struct sock             *sk;
 
@@ -735,7 +737,11 @@ struct sk_buff {
                                peeked:1,
                                head_frag:1,
                                xmit_more:1,
-                               __unused:1; /* one bit hole */
+#ifdef CONFIG_TLS_DEVICE
+                               decrypted:1;
+#else
+                               __unused:1;
+#endif
 
        /* fields enclosed in headers_start/headers_end are copied
         * using a single memcpy() in __copy_skb_header()
@@ -3252,7 +3258,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
                                    int *peeked, int *off, int *err);
 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
                                  int *err);
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events);
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+                          struct poll_table_struct *wait);
 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
                           struct iov_iter *to, int size);
 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
index 09fa2c6f0e68e69567b8b918cdda3f13f5ddaefa..3a1a1dbc6f49479f61f4c1a6588e6a672f0b0663 100644 (file)
@@ -155,8 +155,12 @@ struct kmem_cache {
 
 #ifdef CONFIG_SYSFS
 #define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *);
 void sysfs_slab_release(struct kmem_cache *);
 #else
+static inline void sysfs_slab_unlink(struct kmem_cache *s)
+{
+}
 static inline void sysfs_slab_release(struct kmem_cache *s)
 {
 }
index 1e8a46435838456ae57af232664ce9686a4ac5c0..fd57888d4942e10166440da41d449a52fc8e1730 100644 (file)
@@ -427,6 +427,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
 #define atomic_dec_and_lock(atomic, lock) \
                __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
 
+extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+                                       unsigned long *flags);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
+               __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+
 int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
                           size_t max_size, unsigned int cpu_mult,
                           gfp_t gfp);
index 73810808cdf266e5cdcfc1e0c6b3af126a0bf4b1..a368a68cb667f848c8a505aecb3b8b24df51b63a 100644 (file)
@@ -231,6 +231,9 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
  */
 #ifndef __SYSCALL_DEFINEx
 #define __SYSCALL_DEFINEx(x, name, ...)                                        \
+       __diag_push();                                                  \
+       __diag_ignore(GCC, 8, "-Wattribute-alias",                      \
+                     "Type aliasing is used to sanitize syscall arguments");\
        asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))       \
                __attribute__((alias(__stringify(__se_sys##name))));    \
        ALLOW_ERROR_INJECTION(sys##name, ERRNO);                        \
@@ -243,6 +246,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
                __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__));       \
                return ret;                                             \
        }                                                               \
+       __diag_pop();                                                   \
        static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 #endif /* __SYSCALL_DEFINEx */
 
index 72705eaf4b84060a45bf04d5170f389a18010eac..58a8d7d713549bee88113a0a3373dd8cc055d72d 100644 (file)
@@ -89,7 +89,7 @@ struct tcp_sack_block {
 
 struct tcp_options_received {
 /*     PAWS/RTTM data  */
-       long    ts_recent_stamp;/* Time we stored ts_recent (for aging) */
+       int     ts_recent_stamp;/* Time we stored ts_recent (for aging) */
        u32     ts_recent;      /* Time stamp to echo next              */
        u32     rcv_tsval;      /* Time stamp value                     */
        u32     rcv_tsecr;      /* Time stamp echo reply                */
@@ -350,6 +350,7 @@ struct tcp_sock {
 #endif
 
 /* Receiver side RTT estimation */
+       u32 rcv_rtt_last_tsecr;
        struct {
                u32     rtt_us;
                u32     seq;
@@ -425,7 +426,7 @@ struct tcp_timewait_sock {
        /* The time we sent the last out-of-window ACK: */
        u32                       tw_last_oow_ack_time;
 
-       long                      tw_ts_recent_stamp;
+       int                       tw_ts_recent_stamp;
 #ifdef CONFIG_TCP_MD5SIG
        struct tcp_md5sig_key     *tw_md5_key;
 #endif
index ca840345571bf6cf9253647c11112252fa7b6241..320d49d85484d4ac2d0fdbbe365ad339bf949c0b 100644 (file)
@@ -74,8 +74,8 @@ struct udp_sock {
        void (*encap_destroy)(struct sock *sk);
 
        /* GRO functions for UDP socket */
-       struct sk_buff **       (*gro_receive)(struct sock *sk,
-                                              struct sk_buff **head,
+       struct sk_buff *        (*gro_receive)(struct sock *sk,
+                                              struct list_head *head,
                                               struct sk_buff *skb);
        int                     (*gro_complete)(struct sock *sk,
                                                struct sk_buff *skb,
index 9e59ebfded625426c2c3764225d58081b704cf0a..683ce41053d9359487a4523812b5442631affe73 100644 (file)
@@ -6,6 +6,7 @@
  * Public action API for classifiers/qdiscs
 */
 
+#include <linux/refcount.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 #include <net/net_namespace.h>
@@ -26,8 +27,8 @@ struct tc_action {
        struct tcf_idrinfo              *idrinfo;
 
        u32                             tcfa_index;
-       int                             tcfa_refcnt;
-       int                             tcfa_bindcnt;
+       refcount_t                      tcfa_refcnt;
+       atomic_t                        tcfa_bindcnt;
        u32                             tcfa_capab;
        int                             tcfa_action;
        struct tcf_t                    tcfa_tm;
@@ -37,7 +38,7 @@ struct tc_action {
        spinlock_t                      tcfa_lock;
        struct gnet_stats_basic_cpu __percpu *cpu_bstats;
        struct gnet_stats_queue __percpu *cpu_qstats;
-       struct tc_cookie        *act_cookie;
+       struct tc_cookie        __rcu *act_cookie;
        struct tcf_chain        *goto_chain;
 };
 #define tcf_index      common.tcfa_index
@@ -91,7 +92,8 @@ struct tc_action_ops {
                          struct netlink_ext_ack *extack);
        int     (*init)(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **act, int ovr,
-                       int bind, struct netlink_ext_ack *extack);
+                       int bind, bool rtnl_held,
+                       struct netlink_ext_ack *extack);
        int     (*walk)(struct net *, struct sk_buff *,
                        struct netlink_callback *, int,
                        const struct tc_action_ops *,
@@ -99,6 +101,7 @@ struct tc_action_ops {
        void    (*stats_update)(struct tc_action *, u64, u32, u64);
        size_t  (*get_fill_size)(const struct tc_action *act);
        struct net_device *(*get_dev)(const struct tc_action *a);
+       int     (*delete)(struct net *net, u32 index);
 };
 
 struct tc_action_net {
@@ -151,6 +154,10 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                   int bind, bool cpustats);
 void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
 
+void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
+int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+                       struct tc_action **a, int bind);
+int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
 int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
 
 static inline int tcf_idr_release(struct tc_action *a, bool bind)
@@ -161,18 +168,20 @@ static inline int tcf_idr_release(struct tc_action *a, bool bind)
 int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
 int tcf_unregister_action(struct tc_action_ops *a,
                          struct pernet_operations *ops);
-int tcf_action_destroy(struct list_head *actions, int bind);
+int tcf_action_destroy(struct tc_action *actions[], int bind);
 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
                    int nr_actions, struct tcf_result *res);
 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
                    struct nlattr *est, char *name, int ovr, int bind,
-                   struct list_head *actions, size_t *attr_size,
-                   struct netlink_ext_ack *extack);
+                   struct tc_action *actions[], size_t *attr_size,
+                   bool rtnl_held, struct netlink_ext_ack *extack);
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
+                                   bool rtnl_held,
                                    struct netlink_ext_ack *extack);
-int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
+int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
+                   int ref);
 int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
@@ -190,9 +199,6 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
 #endif
 }
 
-typedef int tc_setup_cb_t(enum tc_setup_type type,
-                         void *type_data, void *cb_priv);
-
 #ifdef CONFIG_NET_CLS_ACT
 int tc_setup_cb_egdev_register(const struct net_device *dev,
                               tc_setup_cb_t *cb, void *cb_priv);
index 53ce8176c31306deaf9c2be5743546abe4d27b53..ec9d6bc658559c55b64ac3c1d23b4e1166cc4b04 100644 (file)
@@ -271,7 +271,7 @@ int  bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                     int flags);
 int  bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                            size_t len, int flags);
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
 int  bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
 int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
index 808f1d1673494d3e09b1c42f229308371f4b2ad9..a2d058170ea3c38739263570bcf14f2a0935e16f 100644 (file)
@@ -411,6 +411,19 @@ static inline bool bond_slave_can_tx(struct slave *slave)
               bond_is_active_slave(slave);
 }
 
+static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev)
+{
+       struct slave *slave;
+       bool active;
+
+       rcu_read_lock();
+       slave = bond_slave_get_rcu(slave_dev);
+       active = bond_is_active_slave(slave);
+       rcu_read_unlock();
+
+       return active;
+}
+
 static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len)
 {
        if (len == ETH_ALEN) {
index c5187438af38fe4b4c309ea327757b316288c4c9..9e36fda652b7e277edb006d981a311ea1cfe6dba 100644 (file)
@@ -151,6 +151,7 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
 #ifdef CONFIG_NET_RX_BUSY_POLL
        sk->sk_napi_id = skb->napi_id;
 #endif
+       sk_rx_queue_set(sk, skb);
 }
 
 /* variant used for unconnected sockets */
index 5fbfe61f41c67f19713bf0e307ae0612428d68a6..9ba1f289c43914c74b0dbe50bd9dadb1946728e3 100644 (file)
@@ -285,6 +285,41 @@ struct ieee80211_sta_vht_cap {
        struct ieee80211_vht_mcs_info vht_mcs;
 };
 
+#define IEEE80211_HE_PPE_THRES_MAX_LEN         25
+
+/**
+ * struct ieee80211_sta_he_cap - STA's HE capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ax HE capabilities for a STA.
+ *
+ * @has_he: true iff HE data is valid.
+ * @he_cap_elem: Fixed portion of the HE capabilities element.
+ * @he_mcs_nss_supp: The supported NSS/MCS combinations.
+ * @ppe_thres: Holds the PPE Thresholds data.
+ */
+struct ieee80211_sta_he_cap {
+       bool has_he;
+       struct ieee80211_he_cap_elem he_cap_elem;
+       struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
+       u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
+};
+
+/**
+ * struct ieee80211_sband_iftype_data
+ *
+ * This structure encapsulates sband data that is relevant for the
+ * interface types defined in @types_mask.  Each type in the
+ * @types_mask must be unique across all instances of iftype_data.
+ *
+ * @types_mask: interface types mask
+ * @he_cap: holds the HE capabilities
+ */
+struct ieee80211_sband_iftype_data {
+       u16 types_mask;
+       struct ieee80211_sta_he_cap he_cap;
+};
+
 /**
  * struct ieee80211_supported_band - frequency band definition
  *
@@ -301,6 +336,11 @@ struct ieee80211_sta_vht_cap {
  * @n_bitrates: Number of bitrates in @bitrates
  * @ht_cap: HT capabilities in this band
  * @vht_cap: VHT capabilities in this band
+ * @n_iftype_data: number of iftype data entries
+ * @iftype_data: interface type data entries.  Note that the bits in
+ *     @types_mask inside this structure cannot overlap (i.e. only
+ *     one occurrence of each type is allowed across all instances of
+ *     iftype_data).
  */
 struct ieee80211_supported_band {
        struct ieee80211_channel *channels;
@@ -310,8 +350,55 @@ struct ieee80211_supported_band {
        int n_bitrates;
        struct ieee80211_sta_ht_cap ht_cap;
        struct ieee80211_sta_vht_cap vht_cap;
+       u16 n_iftype_data;
+       const struct ieee80211_sband_iftype_data *iftype_data;
 };
 
+/**
+ * ieee80211_get_sband_iftype_data - return sband data for a given iftype
+ * @sband: the sband to search for the STA on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found
+ */
+static inline const struct ieee80211_sband_iftype_data *
+ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
+                               u8 iftype)
+{
+       int i;
+
+       if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
+               return NULL;
+
+       for (i = 0; i < sband->n_iftype_data; i++)  {
+               const struct ieee80211_sband_iftype_data *data =
+                       &sband->iftype_data[i];
+
+               if (data->types_mask & BIT(iftype))
+                       return data;
+       }
+
+       return NULL;
+}
+
+/**
+ * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA
+ * @sband: the sband to search for the STA on
+ *
+ * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
+ */
+static inline const struct ieee80211_sta_he_cap *
+ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband)
+{
+       const struct ieee80211_sband_iftype_data *data =
+               ieee80211_get_sband_iftype_data(sband, NL80211_IFTYPE_STATION);
+
+       if (data && data->he_cap.has_he)
+               return &data->he_cap;
+
+       return NULL;
+}
+
 /**
  * wiphy_read_of_freq_limits - read frequency limits from device tree
  *
@@ -899,6 +986,8 @@ enum station_parameters_apply_mask {
  * @opmode_notif: operating mode field from Operating Mode Notification
  * @opmode_notif_used: information if operating mode field is used
  * @support_p2p_ps: information if station supports P2P PS mechanism
+ * @he_capa: HE capabilities of station
+ * @he_capa_len: the length of the HE capabilities
  */
 struct station_parameters {
        const u8 *supported_rates;
@@ -926,6 +1015,8 @@ struct station_parameters {
        u8 opmode_notif;
        bool opmode_notif_used;
        int support_p2p_ps;
+       const struct ieee80211_he_cap_elem *he_capa;
+       u8 he_capa_len;
 };
 
 /**
@@ -1000,12 +1091,14 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
  * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
  * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
  * @RATE_INFO_FLAGS_60G: 60GHz MCS
+ * @RATE_INFO_FLAGS_HE_MCS: HE MCS information
  */
 enum rate_info_flags {
        RATE_INFO_FLAGS_MCS                     = BIT(0),
        RATE_INFO_FLAGS_VHT_MCS                 = BIT(1),
        RATE_INFO_FLAGS_SHORT_GI                = BIT(2),
        RATE_INFO_FLAGS_60G                     = BIT(3),
+       RATE_INFO_FLAGS_HE_MCS                  = BIT(4),
 };
 
 /**
@@ -1019,6 +1112,7 @@ enum rate_info_flags {
  * @RATE_INFO_BW_40: 40 MHz bandwidth
  * @RATE_INFO_BW_80: 80 MHz bandwidth
  * @RATE_INFO_BW_160: 160 MHz bandwidth
+ * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
  */
 enum rate_info_bw {
        RATE_INFO_BW_20 = 0,
@@ -1027,6 +1121,7 @@ enum rate_info_bw {
        RATE_INFO_BW_40,
        RATE_INFO_BW_80,
        RATE_INFO_BW_160,
+       RATE_INFO_BW_HE_RU,
 };
 
 /**
@@ -1035,10 +1130,14 @@ enum rate_info_bw {
  * Information about a receiving or transmitting bitrate
  *
  * @flags: bitflag of flags from &enum rate_info_flags
- * @mcs: mcs index if struct describes a 802.11n bitrate
+ * @mcs: mcs index if struct describes an HT/VHT/HE rate
  * @legacy: bitrate in 100kbit/s for 802.11abg
- * @nss: number of streams (VHT only)
+ * @nss: number of streams (VHT & HE only)
  * @bw: bandwidth (from &enum rate_info_bw)
+ * @he_gi: HE guard interval (from &enum nl80211_he_gi)
+ * @he_dcm: HE DCM value
+ * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
+ *     only valid if bw is %RATE_INFO_BW_HE_RU)
  */
 struct rate_info {
        u8 flags;
@@ -1046,6 +1145,9 @@ struct rate_info {
        u16 legacy;
        u8 nss;
        u8 bw;
+       u8 he_gi;
+       u8 he_dcm;
+       u8 he_ru_alloc;
 };
 
 /**
index e336ea9c73df31df038f5905e5524e57d1652894..b9b89d6604d402eb7ab83f1273526eba7fec42d8 100644 (file)
@@ -27,6 +27,9 @@ struct devlink {
        struct list_head sb_list;
        struct list_head dpipe_table_list;
        struct list_head resource_list;
+       struct list_head param_list;
+       struct list_head region_list;
+       u32 snapshot_id;
        struct devlink_dpipe_headers *dpipe_headers;
        const struct devlink_ops *ops;
        struct device *dev;
@@ -295,6 +298,115 @@ struct devlink_resource {
 
 #define DEVLINK_RESOURCE_ID_PARENT_TOP 0
 
+#define DEVLINK_PARAM_MAX_STRING_VALUE 32
+enum devlink_param_type {
+       DEVLINK_PARAM_TYPE_U8,
+       DEVLINK_PARAM_TYPE_U16,
+       DEVLINK_PARAM_TYPE_U32,
+       DEVLINK_PARAM_TYPE_STRING,
+       DEVLINK_PARAM_TYPE_BOOL,
+};
+
+union devlink_param_value {
+       u8 vu8;
+       u16 vu16;
+       u32 vu32;
+       const char *vstr;
+       bool vbool;
+};
+
+struct devlink_param_gset_ctx {
+       union devlink_param_value val;
+       enum devlink_param_cmode cmode;
+};
+
+/**
+ * struct devlink_param - devlink configuration parameter data
+ * @name: name of the parameter
+ * @generic: indicates if the parameter is generic or driver specific
+ * @type: parameter type
+ * @supported_cmodes: bitmap of supported configuration modes
+ * @get: get parameter value, used for runtime and permanent
+ *       configuration modes
+ * @set: set parameter value, used for runtime and permanent
+ *       configuration modes
+ * @validate: validate input value is applicable (within value range, etc.)
+ *
+ * This struct should be used by the driver to fill the data for
+ * a parameter it registers.
+ */
+struct devlink_param {
+       u32 id;
+       const char *name;
+       bool generic;
+       enum devlink_param_type type;
+       unsigned long supported_cmodes;
+       int (*get)(struct devlink *devlink, u32 id,
+                  struct devlink_param_gset_ctx *ctx);
+       int (*set)(struct devlink *devlink, u32 id,
+                  struct devlink_param_gset_ctx *ctx);
+       int (*validate)(struct devlink *devlink, u32 id,
+                       union devlink_param_value val,
+                       struct netlink_ext_ack *extack);
+};
+
+struct devlink_param_item {
+       struct list_head list;
+       const struct devlink_param *param;
+       union devlink_param_value driverinit_value;
+       bool driverinit_value_valid;
+};
+
+enum devlink_param_generic_id {
+       DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+       DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+       DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
+       DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+
+       /* add new param generic ids above here*/
+       __DEVLINK_PARAM_GENERIC_ID_MAX,
+       DEVLINK_PARAM_GENERIC_ID_MAX = __DEVLINK_PARAM_GENERIC_ID_MAX - 1,
+};
+
+#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME "internal_error_reset"
+#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_MAX_MACS_NAME "max_macs"
+#define DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME "enable_sriov"
+#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable"
+#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate)     \
+{                                                                      \
+       .id = DEVLINK_PARAM_GENERIC_ID_##_id,                           \
+       .name = DEVLINK_PARAM_GENERIC_##_id##_NAME,                     \
+       .type = DEVLINK_PARAM_GENERIC_##_id##_TYPE,                     \
+       .generic = true,                                                \
+       .supported_cmodes = _cmodes,                                    \
+       .get = _get,                                                    \
+       .set = _set,                                                    \
+       .validate = _validate,                                          \
+}
+
+#define DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, _get, _set, _validate)        \
+{                                                                      \
+       .id = _id,                                                      \
+       .name = _name,                                                  \
+       .type = _type,                                                  \
+       .supported_cmodes = _cmodes,                                    \
+       .get = _get,                                                    \
+       .set = _set,                                                    \
+       .validate = _validate,                                          \
+}
+
+struct devlink_region;
+
+typedef void devlink_snapshot_data_dest_t(const void *data);
+
 struct devlink_ops {
        int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack);
        int (*port_type_set)(struct devlink_port *devlink_port,
@@ -430,6 +542,26 @@ void devlink_resource_occ_get_register(struct devlink *devlink,
                                       void *occ_get_priv);
 void devlink_resource_occ_get_unregister(struct devlink *devlink,
                                         u64 resource_id);
+int devlink_params_register(struct devlink *devlink,
+                           const struct devlink_param *params,
+                           size_t params_count);
+void devlink_params_unregister(struct devlink *devlink,
+                              const struct devlink_param *params,
+                              size_t params_count);
+int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+                                      union devlink_param_value *init_val);
+int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+                                      union devlink_param_value init_val);
+void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+struct devlink_region *devlink_region_create(struct devlink *devlink,
+                                            const char *region_name,
+                                            u32 region_max_snapshots,
+                                            u64 region_size);
+void devlink_region_destroy(struct devlink_region *region);
+u32 devlink_region_shapshot_id_get(struct devlink *devlink);
+int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
+                                  u8 *data, u32 snapshot_id,
+                                  devlink_snapshot_data_dest_t *data_destructor);
 
 #else
 
@@ -622,6 +754,69 @@ devlink_resource_occ_get_unregister(struct devlink *devlink,
 {
 }
 
+static inline int
+devlink_params_register(struct devlink *devlink,
+                       const struct devlink_param *params,
+                       size_t params_count)
+{
+       return 0;
+}
+
+static inline void
+devlink_params_unregister(struct devlink *devlink,
+                         const struct devlink_param *params,
+                         size_t params_count)
+{
+
+}
+
+static inline int
+devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+                                  union devlink_param_value *init_val)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+                                  union devlink_param_value init_val)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void
+devlink_param_value_changed(struct devlink *devlink, u32 param_id)
+{
+}
+
+static inline struct devlink_region *
+devlink_region_create(struct devlink *devlink,
+                     const char *region_name,
+                     u32 region_max_snapshots,
+                     u64 region_size)
+{
+       return NULL;
+}
+
+static inline void
+devlink_region_destroy(struct devlink_region *region)
+{
+}
+
+static inline u32
+devlink_region_shapshot_id_get(struct devlink *devlink)
+{
+       return 0;
+}
+
+static inline int
+devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
+                              u8 *data, u32 snapshot_id,
+                              devlink_snapshot_data_dest_t *data_destructor)
+{
+       return 0;
+}
+
 #endif
 
 #endif /* _NET_DEVLINK_H_ */
index adc24df56b907d4598a1b08d4ab5da01eddc0c85..c64406717eee657db0b995964f8d713445f36719 100644 (file)
@@ -47,7 +47,7 @@ struct flow_dissector_key_tags {
 struct flow_dissector_key_vlan {
        u16     vlan_id:12,
                vlan_priority:3;
-       u16     padding;
+       __be16  vlan_tpid;
 };
 
 struct flow_dissector_key_mpls {
@@ -206,6 +206,7 @@ enum flow_dissector_key_id {
        FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
        FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
        FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
+       FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */
 
        FLOW_DISSECTOR_KEY_MAX,
 };
@@ -237,6 +238,7 @@ struct flow_keys {
        struct flow_dissector_key_basic basic;
        struct flow_dissector_key_tags tags;
        struct flow_dissector_key_vlan vlan;
+       struct flow_dissector_key_vlan cvlan;
        struct flow_dissector_key_keyid keyid;
        struct flow_dissector_key_ports ports;
        struct flow_dissector_key_addrs addrs;
index 960236fb168184325dff962c116d305ff7541469..feef706e1158256569b08aa0fb1b24534ceebaac 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2017          Intel Deutschland GmbH
+ * Copyright (c) 2018          Intel Corporation
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -72,6 +73,8 @@ enum ieee80211_radiotap_presence {
        IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
        IEEE80211_RADIOTAP_VHT = 21,
        IEEE80211_RADIOTAP_TIMESTAMP = 22,
+       IEEE80211_RADIOTAP_HE = 23,
+       IEEE80211_RADIOTAP_HE_MU = 24,
 
        /* valid in every it_present bitmap, even vendor namespaces */
        IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -202,6 +205,126 @@ enum ieee80211_radiotap_timestamp_flags {
        IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02,
 };
 
+struct ieee80211_radiotap_he {
+       __le16 data1, data2, data3, data4, data5, data6;
+};
+
+enum ieee80211_radiotap_he_bits {
+       IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK         = 3,
+       IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU           = 0,
+       IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU       = 1,
+       IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU           = 2,
+       IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG         = 3,
+
+       IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN     = 0x0004,
+       IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN   = 0x0008,
+       IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN         = 0x0010,
+       IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN      = 0x0020,
+       IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN      = 0x0040,
+       IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN        = 0x0080,
+       IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN  = 0x0100,
+       IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN          = 0x0200,
+       IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN    = 0x0400,
+       IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN   = 0x0800,
+       IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN   = 0x1000,
+       IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN   = 0x2000,
+       IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN   = 0x4000,
+       IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN       = 0x8000,
+
+       IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN     = 0x0001,
+       IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN            = 0x0002,
+       IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN  = 0x0004,
+       IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN   = 0x0008,
+       IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN          = 0x0010,
+       IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN   = 0x0020,
+       IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN          = 0x0040,
+       IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN      = 0x0080,
+       IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET           = 0x3f00,
+       IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN     = 0x4000,
+       IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC       = 0x8000,
+
+       IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR           = 0x003f,
+       IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE         = 0x0040,
+       IEEE80211_RADIOTAP_HE_DATA3_UL_DL               = 0x0080,
+       IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS            = 0x0f00,
+       IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM            = 0x1000,
+       IEEE80211_RADIOTAP_HE_DATA3_CODING              = 0x2000,
+       IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG        = 0x4000,
+       IEEE80211_RADIOTAP_HE_DATA3_STBC                = 0x8000,
+
+       IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE    = 0x000f,
+       IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID           = 0x7ff0,
+       IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1      = 0x000f,
+       IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2      = 0x00f0,
+       IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3      = 0x0f00,
+       IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4      = 0xf000,
+
+       IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC    = 0x000f,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ      = 0,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ      = 1,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ      = 2,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ     = 3,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T        = 4,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T        = 5,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T       = 6,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T       = 7,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T       = 8,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T       = 9,
+               IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T     = 10,
+
+       IEEE80211_RADIOTAP_HE_DATA5_GI                  = 0x0030,
+               IEEE80211_RADIOTAP_HE_DATA5_GI_0_8                      = 0,
+               IEEE80211_RADIOTAP_HE_DATA5_GI_1_6                      = 1,
+               IEEE80211_RADIOTAP_HE_DATA5_GI_3_2                      = 2,
+
+       IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE            = 0x00c0,
+               IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN            = 0,
+               IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X                 = 1,
+               IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X                 = 2,
+               IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X                 = 3,
+       IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS        = 0x0700,
+       IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD         = 0x3000,
+       IEEE80211_RADIOTAP_HE_DATA5_TXBF                = 0x4000,
+       IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG         = 0x8000,
+
+       IEEE80211_RADIOTAP_HE_DATA6_NSTS                = 0x000f,
+       IEEE80211_RADIOTAP_HE_DATA6_DOPPLER             = 0x0010,
+       IEEE80211_RADIOTAP_HE_DATA6_TXOP                = 0x7f00,
+       IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY      = 0x8000,
+};
+
+struct ieee80211_radiotap_he_mu {
+       __le16 flags1, flags2;
+       u8 ru_ch1[4];
+       u8 ru_ch2[4];
+};
+
+enum ieee80211_radiotap_he_mu_bits {
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS               = 0x000f,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN         = 0x0010,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM               = 0x0020,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN         = 0x0040,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN    = 0x0080,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN            = 0x0100,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN            = 0x0200,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN    = 0x1000,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU          = 0x2000,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN        = 0x4000,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN  = 0x8000,
+
+       IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW        = 0x0003,
+               IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ  = 0x0000,
+               IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ  = 0x0001,
+               IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ  = 0x0002,
+               IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN  = 0x0004,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP              = 0x0008,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS        = 0x00f0,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW      = 0x0300,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400,
+       IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU          = 0x0800,
+};
+
 /**
  * ieee80211_get_radiotap_len - get radiotap header length
  */
index 384b90c62c0bce9b60d74383a2c68287a3f30d95..3ca969cbd16117fe15b1521333c5d7a28c8709f7 100644 (file)
@@ -43,7 +43,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
                    int *addr_len);
 
-struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb);
 int inet_gro_complete(struct sk_buff *skb, int nhoff);
 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                 netdev_features_t features);
index ed07e3786d98614898bd5ec9804afb425ba82cec..f4272a29dc445ec9745b943578f8288bf92a2e8e 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __NET_FRAG_H__
 #define __NET_FRAG_H__
 
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
 
 struct netns_frags {
        /* sysctls */
index 83d5b3c2ac421ca29e8ed654dcbf054379e3001b..314be484c69608cf282fa4ad3146bd9bdf40d20b 100644 (file)
@@ -148,6 +148,7 @@ struct inet_cork {
        __s16                   tos;
        char                    priority;
        __u16                   gso_size;
+       u64                     transmit_time;
 };
 
 struct inet_cork_full {
index 0d2281b4b27ac0804176c063de830663762ab980..e44b1a44f67ad447528f1c59f05915157e016154 100644 (file)
@@ -72,13 +72,27 @@ struct ipcm_cookie {
        __be32                  addr;
        int                     oif;
        struct ip_options_rcu   *opt;
-       __u8                    tx_flags;
        __u8                    ttl;
        __s16                   tos;
        char                    priority;
        __u16                   gso_size;
 };
 
+static inline void ipcm_init(struct ipcm_cookie *ipcm)
+{
+       *ipcm = (struct ipcm_cookie) { .tos = -1 };
+}
+
+static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+                               const struct inet_sock *inet)
+{
+       ipcm_init(ipcm);
+
+       ipcm->sockc.tsflags = inet->sk.sk_tsflags;
+       ipcm->oif = inet->sk.sk_bound_dev_if;
+       ipcm->addr = inet->inet_saddr;
+}
+
 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
 
@@ -138,6 +152,8 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
                          struct ip_options_rcu *opt);
 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
           struct net_device *orig_dev);
+void ip_list_rcv(struct list_head *head, struct packet_type *pt,
+                struct net_device *orig_dev);
 int ip_local_deliver(struct sk_buff *skb);
 int ip_mr_input(struct sk_buff *skb);
 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -148,7 +164,8 @@ void ip_send_check(struct iphdr *ip);
 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 
-int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
+int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
+                   __u8 tos);
 void ip_init(void);
 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
                   int getfrag(void *from, char *to, int offset, int len,
@@ -174,6 +191,12 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
                            struct ipcm_cookie *ipc, struct rtable **rtp,
                            struct inet_cork *cork, unsigned int flags);
 
+static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
+                               struct flowi *fl)
+{
+       return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
+}
+
 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
 {
        return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
index 5cba71d2dc44b9ea2366725ff68c9f668f639345..71b9043aa0e7995c7e61f17b4493acd99410d1bc 100644 (file)
@@ -170,6 +170,7 @@ struct fib6_info {
                                        unused:3;
 
        struct fib6_nh                  fib6_nh;
+       struct rcu_head                 rcu;
 };
 
 struct rt6_info {
@@ -273,7 +274,7 @@ static inline void ip6_rt_put(struct rt6_info *rt)
 }
 
 struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
-void fib6_info_destroy(struct fib6_info *f6i);
+void fib6_info_destroy_rcu(struct rcu_head *head);
 
 static inline void fib6_info_hold(struct fib6_info *f6i)
 {
@@ -283,7 +284,7 @@ static inline void fib6_info_hold(struct fib6_info *f6i)
 static inline void fib6_info_release(struct fib6_info *f6i)
 {
        if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
-               fib6_info_destroy(f6i);
+               call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
 }
 
 enum fib6_walk_state {
index 90ff430f5e9d04b1899ccadbc888f6f1376921b4..b0d022ff6ea1702037b84a038f3c81ce56540aa4 100644 (file)
@@ -466,10 +466,12 @@ static inline void ip_tunnel_info_opts_get(void *to,
 }
 
 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
-                                          const void *from, int len)
+                                          const void *from, int len,
+                                          __be16 flags)
 {
        memcpy(ip_tunnel_info_opts(info), from, len);
        info->options_len = len;
+       info->key.tun_flags |= flags;
 }
 
 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
@@ -511,9 +513,11 @@ static inline void ip_tunnel_info_opts_get(void *to,
 }
 
 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
-                                          const void *from, int len)
+                                          const void *from, int len,
+                                          __be16 flags)
 {
        info->options_len = 0;
+       info->key.tun_flags |= flags;
 }
 
 #endif /* CONFIG_INET */
index 16475c269749a72f3c487e102e50cabff797317e..aa6fd11a887ca6f31e330bf013d757ed7fc1d5f5 100644 (file)
@@ -294,6 +294,7 @@ struct ipv6_fl_socklist {
 };
 
 struct ipcm6_cookie {
+       struct sockcm_cookie sockc;
        __s16 hlimit;
        __s16 tclass;
        __s8  dontfrag;
@@ -301,6 +302,25 @@ struct ipcm6_cookie {
        __u16 gso_size;
 };
 
+static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
+{
+       *ipc6 = (struct ipcm6_cookie) {
+               .hlimit = -1,
+               .tclass = -1,
+               .dontfrag = -1,
+       };
+}
+
+static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
+                                const struct ipv6_pinfo *np)
+{
+       *ipc6 = (struct ipcm6_cookie) {
+               .hlimit = -1,
+               .tclass = np->tclass,
+               .dontfrag = np->dontfrag,
+       };
+}
+
 static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
 {
        struct ipv6_txoptions *opt;
@@ -922,6 +942,8 @@ static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
 
 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
             struct packet_type *pt, struct net_device *orig_dev);
+void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
+                  struct net_device *orig_dev);
 
 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
 
@@ -938,8 +960,7 @@ int ip6_append_data(struct sock *sk,
                                int odd, struct sk_buff *skb),
                    void *from, int length, int transhdrlen,
                    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
-                   struct rt6_info *rt, unsigned int flags,
-                   const struct sockcm_cookie *sockc);
+                   struct rt6_info *rt, unsigned int flags);
 
 int ip6_push_pending_frames(struct sock *sk);
 
@@ -956,8 +977,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
                             void *from, int length, int transhdrlen,
                             struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
                             struct rt6_info *rt, unsigned int flags,
-                            struct inet_cork_full *cork,
-                            const struct sockcm_cookie *sockc);
+                            struct inet_cork_full *cork);
 
 static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
 {
index b0eaeb02d46d14ceb87f6e62d4765959c8383a66..f4c21b5a1242baac0415b3dde8fbc30524690ee7 100644 (file)
@@ -153,6 +153,8 @@ struct iucv_sock_list {
        atomic_t          autobind_name;
 };
 
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+                           poll_table *wait);
 void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
 void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
diff --git a/include/net/lag.h b/include/net/lag.h
new file mode 100644 (file)
index 0000000..95b880e
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_LAG_H
+#define _LINUX_IF_LAG_H
+
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+#include <net/bonding.h>
+
+static inline bool net_lag_port_dev_txable(const struct net_device *port_dev)
+{
+       if (netif_is_team_port(port_dev))
+               return team_port_dev_txable(port_dev);
+       else
+               return bond_is_active_slave_dev(port_dev);
+}
+
+#endif /* _LINUX_IF_LAG_H */
index 851a5e19ae320e02044c40cb6c810d6cd0454d4d..5790f55c241df838384145428cc7305cb83904f7 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/ieee80211.h>
 #include <net/cfg80211.h>
 #include <net/codel.h>
+#include <net/ieee80211_radiotap.h>
 #include <asm/unaligned.h>
 
 /**
@@ -162,6 +163,8 @@ enum ieee80211_ac_numbers {
  * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled
  * @acm: is mandatory admission control required for the access category
  * @uapsd: is U-APSD mode enabled for the queue
+ * @mu_edca: is the MU EDCA configured
+ * @mu_edca_param_rec: MU EDCA Parameter Record for HE
  */
 struct ieee80211_tx_queue_params {
        u16 txop;
@@ -170,6 +173,8 @@ struct ieee80211_tx_queue_params {
        u8 aifs;
        bool acm;
        bool uapsd;
+       bool mu_edca;
+       struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec;
 };
 
 struct ieee80211_low_level_stats {
@@ -463,6 +468,15 @@ struct ieee80211_mu_group_data {
  * This structure keeps information about a BSS (and an association
  * to that BSS) that can change during the lifetime of the BSS.
  *
+ * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE
+ * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
+ * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK
+ * @uora_exists: is the UORA element advertised by AP
+ * @ack_enabled: indicates support to receive a multi-TID that solicits either
+ *     ACK, BACK or both
+ * @uora_ocw_range: UORA element's OCW Range field
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @he_support: does this BSS support HE
  * @assoc: association status
  * @ibss_joined: indicates whether this station is part of an IBSS
  *     or not
@@ -550,6 +564,14 @@ struct ieee80211_mu_group_data {
  */
 struct ieee80211_bss_conf {
        const u8 *bssid;
+       u8 bss_color;
+       u8 htc_trig_based_pkt_ext;
+       bool multi_sta_back_32bit;
+       bool uora_exists;
+       bool ack_enabled;
+       u8 uora_ocw_range;
+       u16 frame_time_rts_th;
+       bool he_support;
        /* association related data */
        bool assoc, ibss_joined;
        bool ibss_creator;
@@ -1106,6 +1128,18 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this
  *     frame
  * @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known
+ * @RX_FLAG_RADIOTAP_HE: HE radiotap data is present
+ *     (&struct ieee80211_radiotap_he, mac80211 will fill in
+ *      - DATA3_DATA_MCS
+ *      - DATA3_DATA_DCM
+ *      - DATA3_CODING
+ *      - DATA5_GI
+ *      - DATA5_DATA_BW_RU_ALLOC
+ *      - DATA6_NSTS
+ *      - DATA3_STBC
+ *     from the RX info data, so leave those zeroed when building this data)
+ * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present
+ *     (&struct ieee80211_radiotap_he_mu)
  */
 enum mac80211_rx_flags {
        RX_FLAG_MMIC_ERROR              = BIT(0),
@@ -1134,6 +1168,8 @@ enum mac80211_rx_flags {
        RX_FLAG_ICV_STRIPPED            = BIT(23),
        RX_FLAG_AMPDU_EOF_BIT           = BIT(24),
        RX_FLAG_AMPDU_EOF_BIT_KNOWN     = BIT(25),
+       RX_FLAG_RADIOTAP_HE             = BIT(26),
+       RX_FLAG_RADIOTAP_HE_MU          = BIT(27),
 };
 
 /**
@@ -1164,6 +1200,7 @@ enum mac80211_rx_encoding {
        RX_ENC_LEGACY = 0,
        RX_ENC_HT,
        RX_ENC_VHT,
+       RX_ENC_HE,
 };
 
 /**
@@ -1198,6 +1235,9 @@ enum mac80211_rx_encoding {
  * @encoding: &enum mac80211_rx_encoding
  * @bw: &enum rate_info_bw
  * @enc_flags: uses bits from &enum mac80211_rx_encoding_flags
+ * @he_ru: HE RU, from &enum nl80211_he_ru_alloc
+ * @he_gi: HE GI, from &enum nl80211_he_gi
+ * @he_dcm: HE DCM value
  * @rx_flags: internal RX flags for mac80211
  * @ampdu_reference: A-MPDU reference number, must be a different value for
  *     each A-MPDU but the same for each subframe within one A-MPDU
@@ -1211,7 +1251,8 @@ struct ieee80211_rx_status {
        u32 flag;
        u16 freq;
        u8 enc_flags;
-       u8 encoding:2, bw:3;
+       u8 encoding:2, bw:3, he_ru:3;
+       u8 he_gi:2, he_dcm:1;
        u8 rate_idx;
        u8 nss;
        u8 rx_flags;
@@ -1770,6 +1811,7 @@ struct ieee80211_sta_rates {
  * @supp_rates: Bitmap of supported rates (per band)
  * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
  * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @he_cap: HE capabilities of this STA
  * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
  *     that this station is allowed to transmit to us.
  *     Can be modified by driver.
@@ -1805,7 +1847,8 @@ struct ieee80211_sta {
        u16 aid;
        struct ieee80211_sta_ht_cap ht_cap;
        struct ieee80211_sta_vht_cap vht_cap;
-       u8 max_rx_aggregation_subframes;
+       struct ieee80211_sta_he_cap he_cap;
+       u16 max_rx_aggregation_subframes;
        bool wme;
        u8 uapsd_queues;
        u8 max_sp;
@@ -2196,10 +2239,11 @@ enum ieee80211_hw_flags {
  *     it shouldn't be set.
  *
  * @max_tx_aggregation_subframes: maximum number of subframes in an
- *     aggregate an HT driver will transmit. Though ADDBA will advertise
- *     a constant value of 64 as some older APs can crash if the window
- *     size is smaller (an example is LinkSys WRT120N with FW v1.0.07
- *     build 002 Jun 18 2012).
+ *     aggregate an HT/HE device will transmit. In HT AddBA we'll
+ *     advertise a constant value of 64 as some older APs crash if
+ *     the window size is smaller (an example is LinkSys WRT120N
+ *     with FW v1.0.07 build 002 Jun 18 2012).
+ *     For AddBA to HE capable peers this value will be used.
  *
  * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum
  *     of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list.
@@ -2216,6 +2260,8 @@ enum ieee80211_hw_flags {
  *     the default is _GI | _BANDWIDTH.
  *     Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
  *
+ * @radiotap_he: HE radiotap validity flags
+ *
  * @radiotap_timestamp: Information for the radiotap timestamp field; if the
  *     'units_pos' member is set to a non-negative value it must be set to
  *     a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
@@ -2263,8 +2309,8 @@ struct ieee80211_hw {
        u8 max_rates;
        u8 max_report_rates;
        u8 max_rate_tries;
-       u8 max_rx_aggregation_subframes;
-       u8 max_tx_aggregation_subframes;
+       u16 max_rx_aggregation_subframes;
+       u16 max_tx_aggregation_subframes;
        u8 max_tx_fragments;
        u8 offchannel_tx_hw_queue;
        u8 radiotap_mcs_details;
@@ -2904,7 +2950,7 @@ struct ieee80211_ampdu_params {
        struct ieee80211_sta *sta;
        u16 tid;
        u16 ssn;
-       u8 buf_size;
+       u16 buf_size;
        bool amsdu;
        u16 timeout;
 };
index 47e35cce3b648d696b127ed7bd643036128795f6..a71264d75d7f98d28f92dfd861ffe6e0d39c0198 100644 (file)
@@ -128,6 +128,7 @@ struct net {
 #endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
        struct netns_nf_frag    nf_frag;
+       struct ctl_table_header *nf_frag_frags_hdr;
 #endif
        struct sock             *nfnl;
        struct sock             *nfnl_stash;
index ba9fa4592f2b238fb642cbefce130d7a100bb0ef..0e355f4a3d76365e2cfab25b0febfd586b245493 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/in.h>
 #include <linux/in6.h>
 #include <linux/netdevice.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
 #include <linux/rcupdate.h>
 #include <linux/netfilter/nf_conntrack_tuple_common.h>
 #include <net/dst.h>
index e811ac07ea94a2c32dd1d36fe59028ec94405571..0d3920896d5023647b9182e8c477b4073950e806 100644 (file)
@@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
 int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
                           u8 proto, int fragment, unsigned int offset,
                           unsigned int logflags);
-void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk);
+void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
+                           struct sock *sk);
 void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
                               unsigned int hooknum, const struct sk_buff *skb,
                               const struct net_device *in,
index 24c78183a4c262e086c29cde1e61b7f397d8bab0..16a842456189f2fc1a3363685b5dd4310a32b2b8 100644 (file)
@@ -9,12 +9,7 @@ struct net;
 static inline u32 net_hash_mix(const struct net *net)
 {
 #ifdef CONFIG_NET_NS
-       /*
-        * shift this right to eliminate bits, that are
-        * always zeroed
-        */
-
-       return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
+       return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
 #else
        return 0;
 #endif
index c978a31b0f846210b4c2a369af960d5349b5395a..762ac9931b6251152b6ee0e5780df0f7b073f3e6 100644 (file)
@@ -109,7 +109,6 @@ struct netns_ipv6 {
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 struct netns_nf_frag {
-       struct netns_sysctl_ipv6 sysctl;
        struct netns_frags      frags;
 };
 #endif
index a3c1a2c47cd4bfd868004548cdf1ef7a361fa4c6..e4252a176eecaeb112612623b2b968e34e1996f6 100644 (file)
@@ -13,6 +13,7 @@ struct tcf_walker {
        int     stop;
        int     skip;
        int     count;
+       unsigned long cookie;
        int     (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
 };
 
@@ -73,11 +74,13 @@ void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
                                             tc_setup_cb_t *cb, void *cb_ident,
-                                            void *cb_priv);
+                                            void *cb_priv,
+                                            struct netlink_ext_ack *extack);
 int tcf_block_cb_register(struct tcf_block *block,
                          tc_setup_cb_t *cb, void *cb_ident,
-                         void *cb_priv);
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
+                         void *cb_priv, struct netlink_ext_ack *extack);
+void __tcf_block_cb_unregister(struct tcf_block *block,
+                              struct tcf_block_cb *block_cb);
 void tcf_block_cb_unregister(struct tcf_block *block,
                             tc_setup_cb_t *cb, void *cb_ident);
 
@@ -111,6 +114,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 {
 }
 
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+       return false;
+}
+
 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 {
        return NULL;
@@ -161,7 +169,8 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
 static inline
 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
                                             tc_setup_cb_t *cb, void *cb_ident,
-                                            void *cb_priv)
+                                            void *cb_priv,
+                                            struct netlink_ext_ack *extack)
 {
        return NULL;
 }
@@ -169,13 +178,14 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
 static inline
 int tcf_block_cb_register(struct tcf_block *block,
                          tc_setup_cb_t *cb, void *cb_ident,
-                         void *cb_priv)
+                         void *cb_priv, struct netlink_ext_ack *extack)
 {
        return 0;
 }
 
 static inline
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
+void __tcf_block_cb_unregister(struct tcf_block *block,
+                              struct tcf_block_cb *block_cb)
 {
 }
 
@@ -596,6 +606,7 @@ struct tc_block_offload {
        enum tc_block_command command;
        enum tcf_block_binder_type binder_type;
        struct tcf_block *block;
+       struct netlink_ext_ack *extack;
 };
 
 struct tc_cls_common_offload {
@@ -771,6 +782,7 @@ struct tc_mqprio_qopt_offload {
 struct tc_cookie {
        u8  *data;
        u32 len;
+       struct rcu_head rcu;
 };
 
 struct tc_qopt_offload_stats {
index 815b92a23936f8f8ce49432b1e5c4c7827580918..7dc769e5452ba6e5eaa92f10808bf2cbfb1d152a 100644 (file)
@@ -72,6 +72,8 @@ struct qdisc_watchdog {
        struct Qdisc    *qdisc;
 };
 
+void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
+                                clockid_t clockid);
 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
 
@@ -153,4 +155,9 @@ struct tc_cbs_qopt_offload {
        s32 sendslope;
 };
 
+struct tc_etf_qopt_offload {
+       u8 enable;
+       s32 queue;
+};
+
 #endif
index 6488daa32f829563c3d26ffe075022f3c9ededf5..7432100027b7f14d4bbd4109901bd8b144a39415 100644 (file)
@@ -20,6 +20,9 @@ struct qdisc_walker;
 struct tcf_walker;
 struct module;
 
+typedef int tc_setup_cb_t(enum tc_setup_type type,
+                         void *type_data, void *cb_priv);
+
 struct qdisc_rate_table {
        struct tc_ratespec rate;
        u32             data[256];
@@ -256,6 +259,9 @@ struct tcf_proto_ops {
                                          bool *last,
                                          struct netlink_ext_ack *);
        void                    (*walk)(struct tcf_proto*, struct tcf_walker *arg);
+       int                     (*reoffload)(struct tcf_proto *tp, bool add,
+                                            tc_setup_cb_t *cb, void *cb_priv,
+                                            struct netlink_ext_ack *extack);
        void                    (*bind_class)(void *, u32, unsigned long);
 
        /* rtnetlink specific */
@@ -330,6 +336,21 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
        block->offloadcnt--;
 }
 
+static inline void
+tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
+                         u32 *flags, bool add)
+{
+       if (add) {
+               if (!*cnt)
+                       tcf_block_offload_inc(block, flags);
+               (*cnt)++;
+       } else {
+               (*cnt)--;
+               if (!*cnt)
+                       tcf_block_offload_dec(block, flags);
+       }
+}
+
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 {
        struct qdisc_skb_cb *qcb;
index 30b3e2fe240a88e3396a8b3664fd879c93fd30bf..8c2caa370e0f683ea764bc0d72da6dfa93699673 100644 (file)
@@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 int sctp_inet_listen(struct socket *sock, int backlog);
 void sctp_write_space(struct sock *sk);
 void sctp_data_ready(struct sock *sk);
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t sctp_poll(struct file *file, struct socket *sock,
+               poll_table *wait);
 void sctp_sock_rfree(struct sk_buff *skb);
 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
                    struct sctp_association *asoc);
index dbe1b911a24d31e920f4c31d3c945857b760424b..ab869e0d83267b730d7b0758dbf90f233962c714 100644 (file)
@@ -48,7 +48,7 @@
 #define __sctp_structs_h__
 
 #include <linux/ktime.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
 #include <linux/socket.h>      /* linux/in.h needs this!!    */
 #include <linux/in.h>          /* We get struct sockaddr_in. */
 #include <linux/in6.h>         /* We get struct in6_addr     */
@@ -193,6 +193,9 @@ struct sctp_sock {
        /* This is the max_retrans value for new associations. */
        __u16 pathmaxrxt;
 
+       __u32 flowlabel;
+       __u8  dscp;
+
        /* The initial Path MTU to use for new associations. */
        __u32 pathmtu;
 
@@ -220,6 +223,7 @@ struct sctp_sock {
        __u32 adaptation_ind;
        __u32 pd_point;
        __u16   nodelay:1,
+               reuse:1,
                disable_fragments:1,
                v4mapped:1,
                frag_interleave:1,
@@ -894,6 +898,9 @@ struct sctp_transport {
         */
        __u16 pathmaxrxt;
 
+       __u32 flowlabel;
+       __u8  dscp;
+
        /* This is the partially failed retrans value for the transport
         * and will be initialized from the assocs value.  This can be changed
         * using the SCTP_PEER_ADDR_THLDS socket option
@@ -1771,6 +1778,9 @@ struct sctp_association {
         */
        __u16 pathmaxrxt;
 
+       __u32 flowlabel;
+       __u8  dscp;
+
        /* Flag that path mtu update is pending */
        __u8   pmtu_pending;
 
index e029e301faa51f45d3fc3c6b87daf79243d6ad44..2567941a2f32ff4896380296463462e43c5d760f 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/ipv6.h>
 #include <net/lwtunnel.h>
 #include <linux/seg6.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
 
 static inline void update_csum_diff4(struct sk_buff *skb, __be32 from,
                                     __be32 to)
index 69c3a106056b4dbe9db91a5295431190aa31c4d5..7fda469e27583a5bf9adfb48230cff385fc9d065 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/route.h>
 #include <net/seg6.h>
 #include <linux/seg6_hmac.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
 
 #define SEG6_HMAC_MAX_DIGESTSIZE       160
 #define SEG6_HMAC_RING_SIZE            256
index 8381d163fefad5b2dd299d5bbc1fd67900bd40e4..9ef49f8b1002a42044e3c70fb86666aacb46a074 100644 (file)
@@ -11,6 +11,8 @@
 #ifndef _SMC_H
 #define _SMC_H
 
+#define SMC_MAX_PNETID_LEN     16      /* Max. length of PNET id */
+
 struct smc_hashinfo {
        rwlock_t lock;
        struct hlist_head ht;
@@ -18,4 +20,67 @@ struct smc_hashinfo {
 
 int smc_hash_sk(struct sock *sk);
 void smc_unhash_sk(struct sock *sk);
+
+/* SMCD/ISM device driver interface */
+struct smcd_dmb {
+       u64 dmb_tok;
+       u64 rgid;
+       u32 dmb_len;
+       u32 sba_idx;
+       u32 vlan_valid;
+       u32 vlan_id;
+       void *cpu_addr;
+       dma_addr_t dma_addr;
+};
+
+#define ISM_EVENT_DMB  0
+#define ISM_EVENT_GID  1
+#define ISM_EVENT_SWR  2
+
+struct smcd_event {
+       u32 type;
+       u32 code;
+       u64 tok;
+       u64 time;
+       u64 info;
+};
+
+struct smcd_dev;
+
+struct smcd_ops {
+       int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
+                               u32 vid);
+       int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+       int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+       int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+       int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+       int (*set_vlan_required)(struct smcd_dev *dev);
+       int (*reset_vlan_required)(struct smcd_dev *dev);
+       int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq,
+                           u32 event_code, u64 info);
+       int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
+                        bool sf, unsigned int offset, void *data,
+                        unsigned int size);
+};
+
+struct smcd_dev {
+       const struct smcd_ops *ops;
+       struct device dev;
+       void *priv;
+       u64 local_gid;
+       struct list_head list;
+       spinlock_t lock;
+       struct smc_connection **conn;
+       struct list_head vlan;
+       struct workqueue_struct *event_wq;
+       u8 pnetid[SMC_MAX_PNETID_LEN];
+};
+
+struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
+                               const struct smcd_ops *ops, int max_dmbs);
+int smcd_register_dev(struct smcd_dev *smcd);
+void smcd_unregister_dev(struct smcd_dev *smcd);
+void smcd_free_dev(struct smcd_dev *smcd);
+void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
+void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit);
 #endif /* _SMC_H */
index b3b75419eafecb5cf170830059f0dc667141646d..83b747538bd0d5e0d57ffe97fd73afe5122ec17a 100644 (file)
@@ -139,6 +139,7 @@ typedef __u64 __bitwise __addrpair;
  *     @skc_node: main hash linkage for various protocol lookup tables
  *     @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
  *     @skc_tx_queue_mapping: tx queue number for this connection
+ *     @skc_rx_queue_mapping: rx queue number for this connection
  *     @skc_flags: place holder for sk_flags
  *             %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
  *             %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -214,7 +215,10 @@ struct sock_common {
                struct hlist_node       skc_node;
                struct hlist_nulls_node skc_nulls_node;
        };
-       int                     skc_tx_queue_mapping;
+       unsigned short          skc_tx_queue_mapping;
+#ifdef CONFIG_XPS
+       unsigned short          skc_rx_queue_mapping;
+#endif
        union {
                int             skc_incoming_cpu;
                u32             skc_rcv_wnd;
@@ -315,6 +319,9 @@ struct sock_common {
   *    @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
   *    @sk_reuseport_cb: reuseport group container
   *    @sk_rcu: used during RCU grace period
+  *    @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
+  *    @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
+  *    @sk_txtime_unused: unused txtime flags
   */
 struct sock {
        /*
@@ -326,6 +333,9 @@ struct sock {
 #define sk_nulls_node          __sk_common.skc_nulls_node
 #define sk_refcnt              __sk_common.skc_refcnt
 #define sk_tx_queue_mapping    __sk_common.skc_tx_queue_mapping
+#ifdef CONFIG_XPS
+#define sk_rx_queue_mapping    __sk_common.skc_rx_queue_mapping
+#endif
 
 #define sk_dontcopy_begin      __sk_common.skc_dontcopy_begin
 #define sk_dontcopy_end                __sk_common.skc_dontcopy_end
@@ -468,6 +478,12 @@ struct sock {
        u8                      sk_shutdown;
        u32                     sk_tskey;
        atomic_t                sk_zckey;
+
+       u8                      sk_clockid;
+       u8                      sk_txtime_deadline_mode : 1,
+                               sk_txtime_report_errors : 1,
+                               sk_txtime_unused : 6;
+
        struct socket           *sk_socket;
        void                    *sk_user_data;
 #ifdef CONFIG_SECURITY
@@ -783,6 +799,7 @@ enum sock_flags {
        SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
        SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
        SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
+       SOCK_TXTIME,
 };
 
 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -1578,10 +1595,17 @@ void sock_kzfree_s(struct sock *sk, void *mem, int size);
 void sk_send_sigurg(struct sock *sk);
 
 struct sockcm_cookie {
+       u64 transmit_time;
        u32 mark;
        u16 tsflags;
 };
 
+static inline void sockcm_init(struct sockcm_cookie *sockc,
+                              const struct sock *sk)
+{
+       *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
+}
+
 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
                     struct sockcm_cookie *sockc);
 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
@@ -1681,19 +1705,58 @@ static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
 
 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
 {
+       /* sk_tx_queue_mapping accept only upto a 16-bit value */
+       if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
+               return;
        sk->sk_tx_queue_mapping = tx_queue;
 }
 
+#define NO_QUEUE_MAPPING       USHRT_MAX
+
 static inline void sk_tx_queue_clear(struct sock *sk)
 {
-       sk->sk_tx_queue_mapping = -1;
+       sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
 }
 
 static inline int sk_tx_queue_get(const struct sock *sk)
 {
-       return sk ? sk->sk_tx_queue_mapping : -1;
+       if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
+               return sk->sk_tx_queue_mapping;
+
+       return -1;
+}
+
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+       if (skb_rx_queue_recorded(skb)) {
+               u16 rx_queue = skb_get_rx_queue(skb);
+
+               if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
+                       return;
+
+               sk->sk_rx_queue_mapping = rx_queue;
+       }
+#endif
 }
 
+static inline void sk_rx_queue_clear(struct sock *sk)
+{
+#ifdef CONFIG_XPS
+       sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+#endif
+}
+
+#ifdef CONFIG_XPS
+static inline int sk_rx_queue_get(const struct sock *sk)
+{
+       if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
+               return sk->sk_rx_queue_mapping;
+
+       return -1;
+}
+#endif
+
 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
 {
        sk_tx_queue_clear(sk);
index 227a6f1d02f4c4a2c401648071aecd733aa1a5a7..fac3ad4a86de96ed1acb1b2747f043471b5f4fd3 100644 (file)
@@ -17,6 +17,7 @@ struct tcf_pedit {
        struct tc_pedit_key     *tcfp_keys;
        struct tcf_pedit_key_ex *tcfp_keys_ex;
 };
+
 #define to_pedit(a) ((struct tcf_pedit *)a)
 
 static inline bool is_tcf_pedit(const struct tc_action *a)
index 19cd3d3458049a2fe6d317bc48ff9e5c0524587b..911bbac838a271b5d3690fc890338f19201c521b 100644 (file)
 #include <net/act_api.h>
 #include <linux/tc_act/tc_skbedit.h>
 
+struct tcf_skbedit_params {
+       u32 flags;
+       u32 priority;
+       u32 mark;
+       u32 mask;
+       u16 queue_mapping;
+       u16 ptype;
+       struct rcu_head rcu;
+};
+
 struct tcf_skbedit {
-       struct tc_action        common;
-       u32             flags;
-       u32             priority;
-       u32             mark;
-       u32             mask;
-       u16             queue_mapping;
-       u16             ptype;
+       struct tc_action common;
+       struct tcf_skbedit_params __rcu *params;
 };
 #define to_skbedit(a) ((struct tcf_skbedit *)a)
 
@@ -37,15 +42,27 @@ struct tcf_skbedit {
 static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
 {
 #ifdef CONFIG_NET_CLS_ACT
-       if (a->ops && a->ops->type == TCA_ACT_SKBEDIT)
-               return to_skbedit(a)->flags == SKBEDIT_F_MARK;
+       u32 flags;
+
+       if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) {
+               rcu_read_lock();
+               flags = rcu_dereference(to_skbedit(a)->params)->flags;
+               rcu_read_unlock();
+               return flags == SKBEDIT_F_MARK;
+       }
 #endif
        return false;
 }
 
 static inline u32 tcf_skbedit_mark(const struct tc_action *a)
 {
-       return to_skbedit(a)->mark;
+       u32 mark;
+
+       rcu_read_lock();
+       mark = rcu_dereference(to_skbedit(a)->params)->mark;
+       rcu_read_unlock();
+
+       return mark;
 }
 
 #endif /* __NET_TC_SKBEDIT_H */
index 0448e7c5d2b4062f8ceecb5b38882385a1be7ead..582304955087f37cf36e48771f699e1421e2869a 100644 (file)
@@ -388,7 +388,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
 void tcp_close(struct sock *sk, long timeout);
 void tcp_init_sock(struct sock *sk);
 void tcp_init_transfer(struct sock *sk, int bpf_op);
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t tcp_poll(struct file *file, struct socket *sock,
+                     struct poll_table_struct *wait);
 int tcp_getsockopt(struct sock *sk, int level, int optname,
                   char __user *optval, int __user *optlen);
 int tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -471,19 +472,20 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
  */
 static inline void tcp_synq_overflow(const struct sock *sk)
 {
-       unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
-       unsigned long now = jiffies;
+       unsigned int last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+       unsigned int now = jiffies;
 
-       if (time_after(now, last_overflow + HZ))
+       if (time_after32(now, last_overflow + HZ))
                tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
 }
 
 /* syncookies: no recent synqueue overflow on this listening socket? */
 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 {
-       unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+       unsigned int last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+       unsigned int now = jiffies;
 
-       return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
+       return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
 }
 
 static inline u32 tcp_cookie_time(void)
@@ -953,6 +955,8 @@ struct rate_sample {
        u32  prior_delivered;   /* tp->delivered at "prior_mstamp" */
        s32  delivered;         /* number of packets delivered over interval */
        long interval_us;       /* time for tp->delivered to incr "delivered" */
+       u32 snd_interval_us;    /* snd interval for delivered packets */
+       u32 rcv_interval_us;    /* rcv interval for delivered packets */
        long rtt_us;            /* RTT of last (S)ACKed packet (or -1) */
        int  losses;            /* number of packets marked lost upon ACK */
        u32  acked_sacked;      /* number of packets newly (S)ACKed upon ACK */
@@ -1184,6 +1188,17 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
        return tp->is_cwnd_limited;
 }
 
+/* BBR congestion control needs pacing.
+ * Same remark for SO_MAX_PACING_RATE.
+ * sch_fq packet scheduler is efficiently handling pacing,
+ * but is not always installed/used.
+ * Return true if TCP stack should pace packets itself.
+ */
+static inline bool tcp_needs_internal_pacing(const struct sock *sk)
+{
+       return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
+}
+
 /* Something is really bad, we could not queue an additional packet,
  * because qdisc is full or receiver sent a 0 window.
  * We do not want to add fuel to the fire, or abort too early,
@@ -1361,7 +1376,8 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
 {
        if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
                return true;
-       if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
+       if (unlikely(!time_before32(ktime_get_seconds(),
+                                   rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
                return true;
        /*
         * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
@@ -1391,7 +1407,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
 
           However, we can relax time bounds for RST segments to MSL.
         */
-       if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
+       if (rst && !time_before32(ktime_get_seconds(),
+                                 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
                return false;
        return true;
 }
@@ -1777,7 +1794,7 @@ void tcp_v4_destroy_sock(struct sock *sk);
 
 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
                                netdev_features_t features);
-struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
 int tcp_gro_complete(struct sk_buff *skb);
 
 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
index 7f84ea3e217cf5e3f78698ee63bc9dced179caed..d8b3b6578c010e46972af88b52d9bee0bc109140 100644 (file)
@@ -83,6 +83,16 @@ struct tls_device {
        void (*unhash)(struct tls_device *device, struct sock *sk);
 };
 
+enum {
+       TLS_BASE,
+       TLS_SW,
+#ifdef CONFIG_TLS_DEVICE
+       TLS_HW,
+#endif
+       TLS_HW_RECORD,
+       TLS_NUM_CONFIG,
+};
+
 struct tls_sw_context_tx {
        struct crypto_aead *aead_send;
        struct crypto_wait async_wait;
@@ -109,7 +119,8 @@ struct tls_sw_context_rx {
 
        struct strparser strp;
        void (*saved_data_ready)(struct sock *sk);
-       __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events);
+       unsigned int (*sk_poll)(struct file *file, struct socket *sock,
+                               struct poll_table_struct *wait);
        struct sk_buff *recv_pkt;
        u8 control;
        bool decrypted;
@@ -127,7 +138,7 @@ struct tls_record_info {
        skb_frag_t frags[MAX_SKB_FRAGS];
 };
 
-struct tls_offload_context {
+struct tls_offload_context_tx {
        struct crypto_aead *aead_send;
        spinlock_t lock;        /* protects records list */
        struct list_head records_list;
@@ -146,8 +157,8 @@ struct tls_offload_context {
 #define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
 };
 
-#define TLS_OFFLOAD_CONTEXT_SIZE                                               \
-       (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) +           \
+#define TLS_OFFLOAD_CONTEXT_SIZE_TX                                            \
+       (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) +        \
         TLS_DRIVER_STATE_SIZE)
 
 enum {
@@ -196,6 +207,7 @@ struct tls_context {
        int (*push_pending_record)(struct sock *sk, int flags);
 
        void (*sk_write_space)(struct sock *sk);
+       void (*sk_destruct)(struct sock *sk);
        void (*sk_proto_close)(struct sock *sk, long timeout);
 
        int  (*setsockopt)(struct sock *sk, int level,
@@ -208,13 +220,27 @@ struct tls_context {
        void (*unhash)(struct sock *sk);
 };
 
+struct tls_offload_context_rx {
+       /* sw must be the first member of tls_offload_context_rx */
+       struct tls_sw_context_rx sw;
+       atomic64_t resync_req;
+       u8 driver_state[];
+       /* The TLS layer reserves room for driver specific state
+        * Currently the belief is that there is not enough
+        * driver specific state to justify another layer of indirection
+        */
+};
+
+#define TLS_OFFLOAD_CONTEXT_SIZE_RX                                    \
+       (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
+        TLS_DRIVER_STATE_SIZE)
+
 int wait_on_pending_writer(struct sock *sk, long *timeo);
 int tls_sk_query(struct sock *sk, int optname, char __user *optval,
                int __user *optlen);
 int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
                  unsigned int optlen);
 
-
 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 int tls_sw_sendpage(struct sock *sk, struct page *page,
@@ -222,9 +248,11 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 void tls_sw_close(struct sock *sk, long timeout);
 void tls_sw_free_resources_tx(struct sock *sk);
 void tls_sw_free_resources_rx(struct sock *sk);
+void tls_sw_release_resources_rx(struct sock *sk);
 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                   int nonblock, int flags, int *addr_len);
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events);
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+                        struct poll_table_struct *wait);
 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
                           struct pipe_inode_info *pipe,
                           size_t len, unsigned int flags);
@@ -237,7 +265,7 @@ void tls_device_sk_destruct(struct sock *sk);
 void tls_device_init(void);
 void tls_device_cleanup(void);
 
-struct tls_record_info *tls_get_record(struct tls_offload_context *context,
+struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
                                       u32 seq, u64 *p_record_sn);
 
 static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
@@ -287,11 +315,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
        return tls_ctx->pending_open_record_frags;
 }
 
+struct sk_buff *
+tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
+                     struct sk_buff *skb);
+
 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
 {
-       return sk_fullsock(sk) &&
-              /* matches smp_store_release in tls_set_device_offload */
-              smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct;
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+       return sk_fullsock(sk) &
+              (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
+              &tls_validate_xmit_skb);
+#else
+       return false;
+#endif
 }
 
 static inline void tls_err_abort(struct sock *sk, int err)
@@ -378,23 +414,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
        return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
 }
 
-static inline struct tls_offload_context *tls_offload_ctx(
-               const struct tls_context *tls_ctx)
+static inline struct tls_offload_context_tx *
+tls_offload_ctx_tx(const struct tls_context *tls_ctx)
 {
-       return (struct tls_offload_context *)tls_ctx->priv_ctx_tx;
+       return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
 }
 
+static inline struct tls_offload_context_rx *
+tls_offload_ctx_rx(const struct tls_context *tls_ctx)
+{
+       return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
+}
+
+/* The TLS context is valid until sk_destruct is called */
+static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+       atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
+}
+
+
 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
                      unsigned char *record_type);
 void tls_register_device(struct tls_device *device);
 void tls_unregister_device(struct tls_device *device);
+int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
+int decrypt_skb(struct sock *sk, struct sk_buff *skb,
+               struct scatterlist *sgout);
 
 struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
                                      struct net_device *dev,
                                      struct sk_buff *skb);
 
 int tls_sw_fallback_init(struct sock *sk,
-                        struct tls_offload_context *offload_ctx,
+                        struct tls_offload_context_tx *offload_ctx,
                         struct tls_crypto_info *crypto_info);
 
+int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
+
+void tls_device_offload_cleanup_rx(struct sock *sk);
+void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
+
 #endif /* _TLS_OFFLOAD_H */
index f6a3543e52477d0b3ec6c883fff554c4e924e0d6..a8f6020f1196edc9940cbb6c605a06279db4fd36 100644 (file)
@@ -42,8 +42,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
                                    struct sk_buff *skb);
 
 int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
-                         struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
-                         struct sockcm_cookie *sockc);
+                         struct flowi6 *fl6, struct ipcm6_cookie *ipc6);
 
 void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
                               __u16 srcp, __u16 destp, int rqueue, int bucket);
index b1ea8b0f5e6a8ce82602e593acd583170b4a6e73..8482a990b0bb8e781883d3e09ea84a6f345863ed 100644 (file)
@@ -170,8 +170,8 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
 typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
                                     __be16 dport);
 
-struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
-                                struct udphdr *uh, udp_lookup_t lookup);
+struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+                               struct udphdr *uh, udp_lookup_t lookup);
 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
 
 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
@@ -285,7 +285,7 @@ int udp_init_sock(struct sock *sk);
 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int __udp_disconnect(struct sock *sk, int flags);
 int udp_disconnect(struct sock *sk, int flags);
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
                                       netdev_features_t features,
                                       bool is_ipv6);
index b95a6927c7185eae2481d21235fa106e8a7ddb94..fe680ab6b15a18aba378d161d57461176b75d8cb 100644 (file)
@@ -65,9 +65,9 @@ static inline int udp_sock_create(struct net *net,
 
 typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
 typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
-typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
-                                                    struct sk_buff **head,
-                                                    struct sk_buff *skb);
+typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
+                                                   struct list_head *head,
+                                                   struct sk_buff *skb);
 typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
                                         int nhoff);
 
index 2deea7166a3486f66a2fd954acc3fcfe2b9d6440..fcb033f51d8c3d00945e3ad6e42d9128fb8bf9d2 100644 (file)
@@ -144,4 +144,17 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
        return unlikely(xdp->data_meta > xdp->data);
 }
 
+struct xdp_attachment_info {
+       struct bpf_prog *prog;
+       u32 flags;
+};
+
+struct netdev_bpf;
+int xdp_attachment_query(struct xdp_attachment_info *info,
+                        struct netdev_bpf *bpf);
+bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
+                            struct netdev_bpf *bpf);
+void xdp_attachment_setup(struct xdp_attachment_info *info,
+                         struct netdev_bpf *bpf);
+
 #endif /* __LINUX_NET_XDP_H__ */
index 4c6241bc203931dcc6b74de5be72349e741cb6be..6c003995347a3904cda6e57814c50bcf6c0733a7 100644 (file)
@@ -3391,11 +3391,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
  *
  * Users can examine the cq structure to determine the actual CQ size.
  */
-struct ib_cq *ib_create_cq(struct ib_device *device,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(struct ib_event *, void *),
-                          void *cq_context,
-                          const struct ib_cq_init_attr *cq_attr);
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+                            ib_comp_handler comp_handler,
+                            void (*event_handler)(struct ib_event *, void *),
+                            void *cq_context,
+                            const struct ib_cq_init_attr *cq_attr,
+                            const char *caller);
+#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
+       __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
 
 /**
  * ib_resize_cq - Modifies the capacity of the CQ.
index 9c886739246ae4e91d8369f4c690bccd56f27912..00aa72ce0e7c79b18b4f597cd1287df1e2bd4aff 100644 (file)
@@ -223,6 +223,13 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
        TP_ARGS(skb)
 );
 
+DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_list_entry,
+
+       TP_PROTO(const struct sk_buff *skb),
+
+       TP_ARGS(skb)
+);
+
 DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
 
        TP_PROTO(const struct sk_buff *skb),
index 3176a393110726c4ccea512ab572d4f827a302f7..a0c4b8a3096604a9817a0f78f58409123a300352 100644 (file)
                EM(TCP_CLOSING)                 \
                EMe(TCP_NEW_SYN_RECV)
 
+#define skmem_kind_names                       \
+               EM(SK_MEM_SEND)                 \
+               EMe(SK_MEM_RECV)
+
 /* enums need to be exported to user space */
 #undef EM
 #undef EMe
@@ -44,6 +48,7 @@
 family_names
 inet_protocol_names
 tcp_state_names
+skmem_kind_names
 
 #undef EM
 #undef EMe
@@ -59,6 +64,9 @@ tcp_state_names
 #define show_tcp_state_name(val)        \
        __print_symbolic(val, tcp_state_names)
 
+#define show_skmem_kind_names(val)     \
+       __print_symbolic(val, skmem_kind_names)
+
 TRACE_EVENT(sock_rcvqueue_full,
 
        TP_PROTO(struct sock *sk, struct sk_buff *skb),
@@ -83,9 +91,9 @@ TRACE_EVENT(sock_rcvqueue_full,
 
 TRACE_EVENT(sock_exceed_buf_limit,
 
-       TP_PROTO(struct sock *sk, struct proto *prot, long allocated),
+       TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind),
 
-       TP_ARGS(sk, prot, allocated),
+       TP_ARGS(sk, prot, allocated, kind),
 
        TP_STRUCT__entry(
                __array(char, name, 32)
@@ -93,6 +101,10 @@ TRACE_EVENT(sock_exceed_buf_limit,
                __field(long, allocated)
                __field(int, sysctl_rmem)
                __field(int, rmem_alloc)
+               __field(int, sysctl_wmem)
+               __field(int, wmem_alloc)
+               __field(int, wmem_queued)
+               __field(int, kind)
        ),
 
        TP_fast_assign(
@@ -101,17 +113,25 @@ TRACE_EVENT(sock_exceed_buf_limit,
                __entry->allocated = allocated;
                __entry->sysctl_rmem = sk_get_rmem0(sk, prot);
                __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
+               __entry->sysctl_wmem = sk_get_wmem0(sk, prot);
+               __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
+               __entry->wmem_queued = sk->sk_wmem_queued;
+               __entry->kind = kind;
        ),
 
-       TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld "
-               "sysctl_rmem=%d rmem_alloc=%d",
+       TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld sysctl_rmem=%d rmem_alloc=%d sysctl_wmem=%d wmem_alloc=%d wmem_queued=%d kind=%s",
                __entry->name,
                __entry->sysctl_mem[0],
                __entry->sysctl_mem[1],
                __entry->sysctl_mem[2],
                __entry->allocated,
                __entry->sysctl_rmem,
-               __entry->rmem_alloc)
+               __entry->rmem_alloc,
+               __entry->sysctl_wmem,
+               __entry->wmem_alloc,
+               __entry->wmem_queued,
+               show_skmem_kind_names(__entry->kind)
+       )
 );
 
 TRACE_EVENT(inet_sock_set_state,
index 0ae758c90e546b84ff503f8aa1a84809454eccd1..a12692e5f7a8462b22791a09fcb0ef41d0450938 100644 (file)
 
 #define SO_ZEROCOPY            60
 
+#define SO_TXTIME              61
+#define SCM_TXTIME             SO_TXTIME
+
 #endif /* __ASM_GENERIC_SOCKET_H */
index d00221345c1988ff59de79f47401903d560c55e0..3c5038b587ba0b3ab6064d5dd8a90e2e8c6bfeae 100644 (file)
@@ -39,8 +39,10 @@ enum {
        IOCB_CMD_PWRITE = 1,
        IOCB_CMD_FSYNC = 2,
        IOCB_CMD_FDSYNC = 3,
-       /* 4 was the experimental IOCB_CMD_PREADX */
-       IOCB_CMD_POLL = 5,
+       /* These two are experimental.
+        * IOCB_CMD_PREADX = 4,
+        * IOCB_CMD_POLL = 5,
+        */
        IOCB_CMD_NOOP = 6,
        IOCB_CMD_PREADV = 7,
        IOCB_CMD_PWRITEV = 8,
index 59b19b6a40d73ea6575f8810a6f4345a931c5a01..870113916caca5ef3acbad43c821d5b5111d0ffc 100644 (file)
@@ -1826,7 +1826,7 @@ union bpf_attr {
  *             A non-negative value equal to or less than *size* on success,
  *             or a negative error in case of failure.
  *
- * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
  *     Description
  *             This helper is similar to **bpf_skb_load_bytes**\ () in that
  *             it provides an easy way to load *len* bytes from *offset*
@@ -1857,7 +1857,8 @@ union bpf_attr {
  *             is resolved), the nexthop address is returned in ipv4_dst
  *             or ipv6_dst based on family, smac is set to mac address of
  *             egress device, dmac is set to nexthop mac address, rt_metric
- *             is set to metric from route (IPv4/IPv6 only).
+ *             is set to metric from route (IPv4/IPv6 only), and ifindex
+ *             is set to the device index of the nexthop from the FIB lookup.
  *
  *             *plen* argument is the size of the passed in struct.
  *             *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
  *             *ctx* is either **struct xdp_md** for XDP programs or
  *             **struct sk_buff** tc cls_act programs.
  *     Return
- *             Egress device index on success, 0 if packet needs to continue
- *             up the stack for further processing or a negative error in case
- *             of failure.
+ *             * < 0 if any input argument is invalid
+ *             *   0 on success (packet is forwarded, nexthop neighbor exists)
+ *             * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ *               packet is not forwarded or needs assist from full stack
  *
  * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
@@ -2031,7 +2033,6 @@ union bpf_attr {
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
- *
  *     Return
  *             0
  *
@@ -2051,7 +2052,6 @@ union bpf_attr {
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
- *
  *     Return
  *             0
  *
@@ -2555,6 +2555,9 @@ enum {
                                         * Arg1: old_state
                                         * Arg2: new_state
                                         */
+       BPF_SOCK_OPS_TCP_LISTEN_CB,     /* Called on listen(2), right after
+                                        * socket transition to LISTEN state.
+                                        */
 };
 
 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -2612,6 +2615,18 @@ struct bpf_raw_tracepoint_args {
 #define BPF_FIB_LOOKUP_DIRECT  BIT(0)
 #define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
 
+enum {
+       BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
+       BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
+       BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
+       BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
+       BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
+       BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+       BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
+       BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
+       BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
+};
+
 struct bpf_fib_lookup {
        /* input:  network family for lookup (AF_INET, AF_INET6)
         * output: network family of egress nexthop
@@ -2625,7 +2640,11 @@ struct bpf_fib_lookup {
 
        /* total length of packet from network header - used for MTU check */
        __u16   tot_len;
-       __u32   ifindex;  /* L3 device index for lookup */
+
+       /* input: L3 device index for lookup
+        * output: device index from FIB lookup
+        */
+       __u32   ifindex;
 
        union {
                /* inputs to lookup */
index 75cb5450c851254764b55445384857b932f012a9..79407bbd296d8d78f113b34c34d515aaa5c188a4 100644 (file)
@@ -78,6 +78,17 @@ enum devlink_command {
         */
        DEVLINK_CMD_RELOAD,
 
+       DEVLINK_CMD_PARAM_GET,          /* can dump */
+       DEVLINK_CMD_PARAM_SET,
+       DEVLINK_CMD_PARAM_NEW,
+       DEVLINK_CMD_PARAM_DEL,
+
+       DEVLINK_CMD_REGION_GET,
+       DEVLINK_CMD_REGION_SET,
+       DEVLINK_CMD_REGION_NEW,
+       DEVLINK_CMD_REGION_DEL,
+       DEVLINK_CMD_REGION_READ,
+
        /* add new commands above here */
        __DEVLINK_CMD_MAX,
        DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -142,6 +153,16 @@ enum devlink_port_flavour {
                                   */
 };
 
+enum devlink_param_cmode {
+       DEVLINK_PARAM_CMODE_RUNTIME,
+       DEVLINK_PARAM_CMODE_DRIVERINIT,
+       DEVLINK_PARAM_CMODE_PERMANENT,
+
+       /* Add new configuration modes above */
+       __DEVLINK_PARAM_CMODE_MAX,
+       DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1
+};
+
 enum devlink_attr {
        /* don't change the order or add anything between, this is ABI! */
        DEVLINK_ATTR_UNSPEC,
@@ -238,6 +259,27 @@ enum devlink_attr {
        DEVLINK_ATTR_PORT_NUMBER,               /* u32 */
        DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */
 
+       DEVLINK_ATTR_PARAM,                     /* nested */
+       DEVLINK_ATTR_PARAM_NAME,                /* string */
+       DEVLINK_ATTR_PARAM_GENERIC,             /* flag */
+       DEVLINK_ATTR_PARAM_TYPE,                /* u8 */
+       DEVLINK_ATTR_PARAM_VALUES_LIST,         /* nested */
+       DEVLINK_ATTR_PARAM_VALUE,               /* nested */
+       DEVLINK_ATTR_PARAM_VALUE_DATA,          /* dynamic */
+       DEVLINK_ATTR_PARAM_VALUE_CMODE,         /* u8 */
+
+       DEVLINK_ATTR_REGION_NAME,               /* string */
+       DEVLINK_ATTR_REGION_SIZE,               /* u64 */
+       DEVLINK_ATTR_REGION_SNAPSHOTS,          /* nested */
+       DEVLINK_ATTR_REGION_SNAPSHOT,           /* nested */
+       DEVLINK_ATTR_REGION_SNAPSHOT_ID,        /* u32 */
+
+       DEVLINK_ATTR_REGION_CHUNKS,             /* nested */
+       DEVLINK_ATTR_REGION_CHUNK,              /* nested */
+       DEVLINK_ATTR_REGION_CHUNK_DATA,         /* binary */
+       DEVLINK_ATTR_REGION_CHUNK_ADDR,         /* u64 */
+       DEVLINK_ATTR_REGION_CHUNK_LEN,          /* u64 */
+
        /* add new attributes above here, update the policy in devlink.c */
 
        __DEVLINK_ATTR_MAX,
index dc64cfaf13da08564a8271e50a4edb89d221b148..c0151200f7d1cf65e961a69e3b4fca23a9ff4785 100644 (file)
@@ -20,12 +20,16 @@ struct sock_extended_err {
 #define SO_EE_ORIGIN_ICMP6     3
 #define SO_EE_ORIGIN_TXSTATUS  4
 #define SO_EE_ORIGIN_ZEROCOPY  5
+#define SO_EE_ORIGIN_TXTIME    6
 #define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
 
 #define SO_EE_OFFENDER(ee)     ((struct sockaddr*)((ee)+1))
 
 #define SO_EE_CODE_ZEROCOPY_COPIED     1
 
+#define SO_EE_CODE_TXTIME_INVALID_PARAM        1
+#define SO_EE_CODE_TXTIME_MISSED       2
+
 /**
  *     struct scm_timestamping - timestamps exposed through cmsg
  *
index cf01b68242448512416c1b1aa25f0904915aad0a..8759cfb8aa2eebabfb247aace11a50a304f07eb1 100644 (file)
@@ -920,6 +920,7 @@ enum {
        XDP_ATTACHED_DRV,
        XDP_ATTACHED_SKB,
        XDP_ATTACHED_HW,
+       XDP_ATTACHED_MULTI,
 };
 
 enum {
@@ -928,6 +929,9 @@ enum {
        IFLA_XDP_ATTACHED,
        IFLA_XDP_FLAGS,
        IFLA_XDP_PROG_ID,
+       IFLA_XDP_DRV_PROG_ID,
+       IFLA_XDP_SKB_PROG_ID,
+       IFLA_XDP_HW_PROG_ID,
        __IFLA_XDP_MAX,
 };
 
index 483b77af4eb8bffe4a465337b85686374e192dc0..db45d3e49a12c73669c0063fe44e0e620fcc5218 100644 (file)
@@ -30,6 +30,7 @@ enum {
        ILA_CMD_ADD,
        ILA_CMD_DEL,
        ILA_CMD_GET,
+       ILA_CMD_FLUSH,
 
        __ILA_CMD_MAX,
 };
index 10f9ff9426a220b700ea4c50219412e6389817d1..5d37a9ccce63c444aa876d31f38e416c72f603f7 100644 (file)
@@ -120,6 +120,7 @@ enum {
        IPMRA_TABLE_MROUTE_DO_ASSERT,
        IPMRA_TABLE_MROUTE_DO_PIM,
        IPMRA_TABLE_VIFS,
+       IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
        __IPMRA_TABLE_MAX
 };
 #define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1)
@@ -173,5 +174,6 @@ enum {
 #define IGMPMSG_NOCACHE                1               /* Kern cache fill request to mrouted */
 #define IGMPMSG_WRONGVIF       2               /* For PIM assert processing (unused) */
 #define IGMPMSG_WHOLEPKT       3               /* For PIM Register processing */
+#define IGMPMSG_WRVIFWHOLE     4               /* For PIM Register and assert processing */
 
 #endif /* _UAPI__LINUX_MROUTE_H */
index 85a3fb65e40a6f3941337c7fad17e7fdff3b33d0..20d6cc91435df90f08741c478ab29ea85efa7167 100644 (file)
@@ -53,6 +53,9 @@ enum {
 /* These are client behavior specific flags. */
 #define NBD_CFLAG_DESTROY_ON_DISCONNECT        (1 << 0) /* delete the nbd device on
                                                    disconnect. */
+#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on
+                                               *  close by last opener.
+                                               */
 
 /* userspace doesn't need the nbd_device structure */
 
index 4fe104b2411f0dcda877b8c83b1c0dcfc4e68c7f..97ff3c17ec4d2021a728c71141f1baa39f707eee 100644 (file)
@@ -141,4 +141,22 @@ struct scm_ts_pktinfo {
        __u32 reserved[2];
 };
 
+/*
+ * SO_TXTIME gets a struct sock_txtime with flags being an integer bit
+ * field comprised of these values.
+ */
+enum txtime_flags {
+       SOF_TXTIME_DEADLINE_MODE = (1 << 0),
+       SOF_TXTIME_REPORT_ERRORS = (1 << 1),
+
+       SOF_TXTIME_FLAGS_LAST = SOF_TXTIME_REPORT_ERRORS,
+       SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_FLAGS_LAST - 1) |
+                                SOF_TXTIME_FLAGS_LAST
+};
+
+struct sock_txtime {
+       clockid_t       clockid;        /* reference clockid */
+       __u32           flags;          /* as defined by enum txtime_flags */
+};
+
 #endif /* _NET_TIMESTAMPING_H */
index 27e4e441caacdbd590a1afad65c6f5c1b3ec3edd..7acc16f349427a772f507e6a25b66b5b95b210d7 100644 (file)
@@ -2237,6 +2237,9 @@ enum nl80211_commands {
  *      enforced.
  * @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes
  *      a flow is assigned on each round of the DRR scheduler.
+ * @NL80211_ATTR_HE_CAPABILITY: HE Capability information element (from
+ *     association request when used with NL80211_CMD_NEW_STATION). Can be set
+ *     only if %NL80211_STA_FLAG_WME is set.
  *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2677,6 +2680,8 @@ enum nl80211_attrs {
        NL80211_ATTR_TXQ_MEMORY_LIMIT,
        NL80211_ATTR_TXQ_QUANTUM,
 
+       NL80211_ATTR_HE_CAPABILITY,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -2726,7 +2731,8 @@ enum nl80211_attrs {
 #define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY    24
 #define NL80211_HT_CAPABILITY_LEN              26
 #define NL80211_VHT_CAPABILITY_LEN             12
-
+#define NL80211_HE_MIN_CAPABILITY_LEN           16
+#define NL80211_HE_MAX_CAPABILITY_LEN           51
 #define NL80211_MAX_NR_CIPHER_SUITES           5
 #define NL80211_MAX_NR_AKM_SUITES              2
 
@@ -2853,6 +2859,38 @@ struct nl80211_sta_flag_update {
        __u32 set;
 } __attribute__((packed));
 
+/**
+ * enum nl80211_he_gi - HE guard interval
+ * @NL80211_RATE_INFO_HE_GI_0_8: 0.8 usec
+ * @NL80211_RATE_INFO_HE_GI_1_6: 1.6 usec
+ * @NL80211_RATE_INFO_HE_GI_3_2: 3.2 usec
+ */
+enum nl80211_he_gi {
+       NL80211_RATE_INFO_HE_GI_0_8,
+       NL80211_RATE_INFO_HE_GI_1_6,
+       NL80211_RATE_INFO_HE_GI_3_2,
+};
+
+/**
+ * enum nl80211_he_ru_alloc - HE RU allocation values
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_106: 106-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_242: 242-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_484: 484-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_996: 996-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_2x996: 2x996-tone RU allocation
+ */
+enum nl80211_he_ru_alloc {
+       NL80211_RATE_INFO_HE_RU_ALLOC_26,
+       NL80211_RATE_INFO_HE_RU_ALLOC_52,
+       NL80211_RATE_INFO_HE_RU_ALLOC_106,
+       NL80211_RATE_INFO_HE_RU_ALLOC_242,
+       NL80211_RATE_INFO_HE_RU_ALLOC_484,
+       NL80211_RATE_INFO_HE_RU_ALLOC_996,
+       NL80211_RATE_INFO_HE_RU_ALLOC_2x996,
+};
+
 /**
  * enum nl80211_rate_info - bitrate information
  *
@@ -2885,6 +2923,13 @@ struct nl80211_sta_flag_update {
  * @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is
  *     a legacy rate and will be reported as the actual bitrate, i.e.
  *     a quarter of the base (20 MHz) rate
+ * @NL80211_RATE_INFO_HE_MCS: HE MCS index (u8, 0-11)
+ * @NL80211_RATE_INFO_HE_NSS: HE NSS value (u8, 1-8)
+ * @NL80211_RATE_INFO_HE_GI: HE guard interval identifier
+ *     (u8, see &enum nl80211_he_gi)
+ * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1)
+ * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then
+ *     non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc)
  * @__NL80211_RATE_INFO_AFTER_LAST: internal use
  */
 enum nl80211_rate_info {
@@ -2901,6 +2946,11 @@ enum nl80211_rate_info {
        NL80211_RATE_INFO_160_MHZ_WIDTH,
        NL80211_RATE_INFO_10_MHZ_WIDTH,
        NL80211_RATE_INFO_5_MHZ_WIDTH,
+       NL80211_RATE_INFO_HE_MCS,
+       NL80211_RATE_INFO_HE_NSS,
+       NL80211_RATE_INFO_HE_GI,
+       NL80211_RATE_INFO_HE_DCM,
+       NL80211_RATE_INFO_HE_RU_ALLOC,
 
        /* keep last */
        __NL80211_RATE_INFO_AFTER_LAST,
@@ -3166,6 +3216,38 @@ enum nl80211_mpath_info {
        NL80211_MPATH_INFO_MAX = __NL80211_MPATH_INFO_AFTER_LAST - 1
 };
 
+/**
+ * enum nl80211_band_iftype_attr - Interface type data attributes
+ *
+ * @__NL80211_BAND_IFTYPE_ATTR_INVALID: attribute number 0 is reserved
+ * @NL80211_BAND_IFTYPE_ATTR_IFTYPES: nested attribute containing a flag attribute
+ *     for each interface type that supports the band data
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC: HE MAC capabilities as in HE
+ *     capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY: HE PHY capabilities as in HE
+ *     capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET: HE supported NSS/MCS as in HE
+ *     capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE: HE PPE thresholds information as
+ *     defined in HE capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently
+ *     defined
+ * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
+ */
+enum nl80211_band_iftype_attr {
+       __NL80211_BAND_IFTYPE_ATTR_INVALID,
+
+       NL80211_BAND_IFTYPE_ATTR_IFTYPES,
+       NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC,
+       NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY,
+       NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
+       NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
+
+       /* keep last */
+       __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
+       NL80211_BAND_IFTYPE_ATTR_MAX = __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST - 1
+};
+
 /**
  * enum nl80211_band_attr - band attributes
  * @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved
@@ -3181,6 +3263,8 @@ enum nl80211_mpath_info {
  * @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as
  *     defined in 802.11ac
  * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE
+ * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using
+ *     attributes from &enum nl80211_band_iftype_attr
  * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
  * @__NL80211_BAND_ATTR_AFTER_LAST: internal use
  */
@@ -3196,6 +3280,7 @@ enum nl80211_band_attr {
 
        NL80211_BAND_ATTR_VHT_MCS_SET,
        NL80211_BAND_ATTR_VHT_CAPA,
+       NL80211_BAND_ATTR_IFTYPE_DATA,
 
        /* keep last */
        __NL80211_BAND_ATTR_AFTER_LAST,
@@ -5133,6 +5218,11 @@ enum nl80211_feature_flags {
  *     support to nl80211.
  * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate
  *      TXQs.
+ * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the
+ *     SN in probe request frames if requested by %NL80211_SCAN_FLAG_RANDOM_SN.
+ * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data
+ *     except for supported rates from the probe request content if requested
+ *     by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag.
  *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -5167,6 +5257,8 @@ enum nl80211_ext_feature_index {
        NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
        NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT,
        NL80211_EXT_FEATURE_TXQS,
+       NL80211_EXT_FEATURE_SCAN_RANDOM_SN,
+       NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
 
        /* add new features before the definition below */
        NUM_NL80211_EXT_FEATURES,
@@ -5272,6 +5364,12 @@ enum nl80211_timeout_reason {
  *     possible scan results. This flag hints the driver to use the best
  *     possible scan configuration to improve the accuracy in scanning.
  *     Latency and power use may get impacted with this flag.
+ * @NL80211_SCAN_FLAG_RANDOM_SN: randomize the sequence number in probe
+ *     request frames from this scan to avoid correlation/tracking being
+ *     possible.
+ * @NL80211_SCAN_FLAG_MIN_PREQ_CONTENT: minimize probe request content to
+ *     only have supported rates and no additional capabilities (unless
+ *     added by userspace explicitly.)
  */
 enum nl80211_scan_flags {
        NL80211_SCAN_FLAG_LOW_PRIORITY                          = 1<<0,
@@ -5285,6 +5383,8 @@ enum nl80211_scan_flags {
        NL80211_SCAN_FLAG_LOW_SPAN                              = 1<<8,
        NL80211_SCAN_FLAG_LOW_POWER                             = 1<<9,
        NL80211_SCAN_FLAG_HIGH_ACCURACY                         = 1<<10,
+       NL80211_SCAN_FLAG_RANDOM_SN                             = 1<<11,
+       NL80211_SCAN_FLAG_MIN_PREQ_CONTENT                      = 1<<12,
 };
 
 /**
index 863aabaa5cc926f82667637eab6974a3dfb0199f..dbe0cbe4f1b72b9e0c4751791614e98b0825989f 100644 (file)
@@ -840,6 +840,8 @@ struct ovs_action_push_eth {
  * @OVS_ACTION_ATTR_POP_NSH: pop the outermost NSH header off the packet.
  * @OVS_ACTION_ATTR_METER: Run packet through a meter, which may drop the
  * packet, or modify the packet (e.g., change the DSCP field).
+ * @OVS_ACTION_ATTR_CLONE: make a copy of the packet and execute a list of
+ * actions without affecting the original packet and key.
  *
  * Only a single header can be set with a single %OVS_ACTION_ATTR_SET.  Not all
  * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -873,6 +875,7 @@ enum ovs_action_attr {
        OVS_ACTION_ATTR_PUSH_NSH,     /* Nested OVS_NSH_KEY_ATTR_*. */
        OVS_ACTION_ATTR_POP_NSH,      /* No argument. */
        OVS_ACTION_ATTR_METER,        /* u32 meter ID. */
+       OVS_ACTION_ATTR_CLONE,        /* Nested OVS_CLONE_ATTR_*.  */
 
        __OVS_ACTION_ATTR_MAX,        /* Nothing past this will be accepted
                                       * from userspace. */
index 84e4c1d0f874afec5891fcf95def286c121f71ed..c4262d91159650b7f649b1a0655bc1902e9b6af5 100644 (file)
@@ -469,6 +469,10 @@ enum {
        TCA_FLOWER_KEY_IP_TTL,          /* u8 */
        TCA_FLOWER_KEY_IP_TTL_MASK,     /* u8 */
 
+       TCA_FLOWER_KEY_CVLAN_ID,        /* be16 */
+       TCA_FLOWER_KEY_CVLAN_PRIO,      /* u8   */
+       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,  /* be16 */
+
        __TCA_FLOWER_MAX,
 };
 
index 37b5096ae97be4e6115b0941b82918e11250ee6b..d9cc9dc4f547cd501f128cc99f173f9264a7a984 100644 (file)
@@ -539,6 +539,7 @@ enum {
        TCA_NETEM_LATENCY64,
        TCA_NETEM_JITTER64,
        TCA_NETEM_SLOT,
+       TCA_NETEM_SLOT_DIST,
        __TCA_NETEM_MAX,
 };
 
@@ -581,6 +582,8 @@ struct tc_netem_slot {
        __s64   max_delay;
        __s32   max_packets;
        __s32   max_bytes;
+       __s64   dist_delay; /* nsec */
+       __s64   dist_jitter; /* nsec */
 };
 
 enum {
@@ -934,4 +937,136 @@ enum {
 
 #define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
 
+
+/* ETF */
+struct tc_etf_qopt {
+       __s32 delta;
+       __s32 clockid;
+       __u32 flags;
+#define TC_ETF_DEADLINE_MODE_ON        BIT(0)
+#define TC_ETF_OFFLOAD_ON      BIT(1)
+};
+
+enum {
+       TCA_ETF_UNSPEC,
+       TCA_ETF_PARMS,
+       __TCA_ETF_MAX,
+};
+
+#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
+
+
+/* CAKE */
+enum {
+       TCA_CAKE_UNSPEC,
+       TCA_CAKE_PAD,
+       TCA_CAKE_BASE_RATE64,
+       TCA_CAKE_DIFFSERV_MODE,
+       TCA_CAKE_ATM,
+       TCA_CAKE_FLOW_MODE,
+       TCA_CAKE_OVERHEAD,
+       TCA_CAKE_RTT,
+       TCA_CAKE_TARGET,
+       TCA_CAKE_AUTORATE,
+       TCA_CAKE_MEMORY,
+       TCA_CAKE_NAT,
+       TCA_CAKE_RAW,
+       TCA_CAKE_WASH,
+       TCA_CAKE_MPU,
+       TCA_CAKE_INGRESS,
+       TCA_CAKE_ACK_FILTER,
+       TCA_CAKE_SPLIT_GSO,
+       __TCA_CAKE_MAX
+};
+#define TCA_CAKE_MAX   (__TCA_CAKE_MAX - 1)
+
+enum {
+       __TCA_CAKE_STATS_INVALID,
+       TCA_CAKE_STATS_PAD,
+       TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
+       TCA_CAKE_STATS_MEMORY_LIMIT,
+       TCA_CAKE_STATS_MEMORY_USED,
+       TCA_CAKE_STATS_AVG_NETOFF,
+       TCA_CAKE_STATS_MIN_NETLEN,
+       TCA_CAKE_STATS_MAX_NETLEN,
+       TCA_CAKE_STATS_MIN_ADJLEN,
+       TCA_CAKE_STATS_MAX_ADJLEN,
+       TCA_CAKE_STATS_TIN_STATS,
+       TCA_CAKE_STATS_DEFICIT,
+       TCA_CAKE_STATS_COBALT_COUNT,
+       TCA_CAKE_STATS_DROPPING,
+       TCA_CAKE_STATS_DROP_NEXT_US,
+       TCA_CAKE_STATS_P_DROP,
+       TCA_CAKE_STATS_BLUE_TIMER_US,
+       __TCA_CAKE_STATS_MAX
+};
+#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
+
+enum {
+       __TCA_CAKE_TIN_STATS_INVALID,
+       TCA_CAKE_TIN_STATS_PAD,
+       TCA_CAKE_TIN_STATS_SENT_PACKETS,
+       TCA_CAKE_TIN_STATS_SENT_BYTES64,
+       TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
+       TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
+       TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
+       TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
+       TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
+       TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
+       TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
+       TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
+       TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
+       TCA_CAKE_TIN_STATS_TARGET_US,
+       TCA_CAKE_TIN_STATS_INTERVAL_US,
+       TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
+       TCA_CAKE_TIN_STATS_WAY_MISSES,
+       TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
+       TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
+       TCA_CAKE_TIN_STATS_AVG_DELAY_US,
+       TCA_CAKE_TIN_STATS_BASE_DELAY_US,
+       TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
+       TCA_CAKE_TIN_STATS_BULK_FLOWS,
+       TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
+       TCA_CAKE_TIN_STATS_MAX_SKBLEN,
+       TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
+       __TCA_CAKE_TIN_STATS_MAX
+};
+#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
+#define TC_CAKE_MAX_TINS (8)
+
+enum {
+       CAKE_FLOW_NONE = 0,
+       CAKE_FLOW_SRC_IP,
+       CAKE_FLOW_DST_IP,
+       CAKE_FLOW_HOSTS,    /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
+       CAKE_FLOW_FLOWS,
+       CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
+       CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
+       CAKE_FLOW_TRIPLE,   /* = CAKE_FLOW_HOSTS  | CAKE_FLOW_FLOWS */
+       CAKE_FLOW_MAX,
+};
+
+enum {
+       CAKE_DIFFSERV_DIFFSERV3 = 0,
+       CAKE_DIFFSERV_DIFFSERV4,
+       CAKE_DIFFSERV_DIFFSERV8,
+       CAKE_DIFFSERV_BESTEFFORT,
+       CAKE_DIFFSERV_PRECEDENCE,
+       CAKE_DIFFSERV_MAX
+};
+
+enum {
+       CAKE_ACK_NONE = 0,
+       CAKE_ACK_FILTER,
+       CAKE_ACK_AGGRESSIVE,
+       CAKE_ACK_MAX
+};
+
+enum {
+       CAKE_ATM_NONE = 0,
+       CAKE_ATM_ATM,
+       CAKE_ATM_PTM,
+       CAKE_ATM_MAX
+};
+
 #endif
index b64d583bf053bef4826a4571589a7c3d76632659..b479db5c71d932082741567a73d479800ee1117e 100644 (file)
@@ -100,6 +100,7 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_RECVNXTINFO       33
 #define SCTP_DEFAULT_SNDINFO   34
 #define SCTP_AUTH_DEACTIVATE_KEY       35
+#define SCTP_REUSE_PORT                36
 
 /* Internal Socket Options. Some of the sctp library functions are
  * implemented using these socket options.
@@ -762,6 +763,8 @@ enum  sctp_spp_flags {
        SPP_SACKDELAY_DISABLE = 1<<6,   /*Disable SACK*/
        SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE,
        SPP_HB_TIME_IS_ZERO = 1<<7,     /* Set HB delay to 0 */
+       SPP_IPV6_FLOWLABEL = 1<<8,
+       SPP_DSCP = 1<<9,
 };
 
 struct sctp_paddrparams {
@@ -772,6 +775,8 @@ struct sctp_paddrparams {
        __u32                   spp_pathmtu;
        __u32                   spp_sackdelay;
        __u32                   spp_flags;
+       __u32                   spp_ipv6_flowlabel;
+       __u8                    spp_dscp;
 } __attribute__((packed, aligned(4)));
 
 /*
index 0ae5d4685ba369d1a83df4f0feabb065f5dbdd74..92be255e534c741844ade9a727ee82309240b619 100644 (file)
@@ -35,6 +35,7 @@ enum {
        SMC_DIAG_CONNINFO,
        SMC_DIAG_LGRINFO,
        SMC_DIAG_SHUTDOWN,
+       SMC_DIAG_DMBINFO,
        __SMC_DIAG_MAX,
 };
 
@@ -83,4 +84,13 @@ struct smc_diag_lgrinfo {
        struct smc_diag_linkinfo        lnk[1];
        __u8                            role;
 };
+
+struct smcd_diag_dmbinfo {             /* SMC-D Socket internals */
+       __u32 linkid;                   /* Link identifier */
+       __u64 peer_gid;                 /* Peer GID */
+       __u64 my_gid;                   /* My GID */
+       __u64 token;                    /* Token of DMB */
+       __u64 peer_token;               /* Token of remote DMBE */
+};
+
 #endif /* _UAPI_SMC_DIAG_H_ */
index 750d89120335eb489f698191edb6c5110969fa8c..e5ebc83827abbcaaf82e1f46011540fc273c65f2 100644 (file)
@@ -279,6 +279,8 @@ enum
        LINUX_MIB_TCPDELIVERED,                 /* TCPDelivered */
        LINUX_MIB_TCPDELIVEREDCE,               /* TCPDeliveredCE */
        LINUX_MIB_TCPACKCOMPRESSED,             /* TCPAckCompressed */
+       LINUX_MIB_TCPZEROWINDOWDROP,            /* TCPZeroWindowDrop */
+       LINUX_MIB_TCPRCVQDROP,                  /* TCPRcvQDrop */
        __LINUX_MIB_MAX
 };
 
index 6e299349b15876d3302cc784576dd84cff6f1d66..b7b57967d90f09cd428d90e12b0035e3ecbcfc67 100644 (file)
@@ -44,6 +44,7 @@
 #define TCMU_MAILBOX_VERSION 2
 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
 #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
+#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
 
 struct tcmu_mailbox {
        __u16 version;
@@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr {
        __u16 cmd_id;
        __u8 kflags;
 #define TCMU_UFLAG_UNKNOWN_OP 0x1
+#define TCMU_UFLAG_READ_LEN   0x2
        __u8 uflags;
 
 } __packed;
@@ -119,7 +121,7 @@ struct tcmu_cmd_entry {
                        __u8 scsi_status;
                        __u8 __pad1;
                        __u16 __pad2;
-                       __u32 __pad3;
+                       __u32 read_len;
                        char sense_buffer[TCMU_SENSE_BUFFERSIZE];
                } rsp;
        };
index 162d1094c41c09ca832c0139474f99b62e6a4aaf..24ec792dacc1828f814625a6b6e0109566e4843b 100644 (file)
@@ -17,13 +17,15 @@ enum {
        TCA_PEDIT_KEY_EX,
        __TCA_PEDIT_MAX
 };
+
 #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
-                                                                                
+
 enum {
        TCA_PEDIT_KEY_EX_HTYPE = 1,
        TCA_PEDIT_KEY_EX_CMD = 2,
        __TCA_PEDIT_KEY_EX_MAX
 };
+
 #define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1)
 
  /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It
@@ -38,6 +40,7 @@ enum pedit_header_type {
        TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5,
        __PEDIT_HDR_TYPE_MAX,
 };
+
 #define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1)
 
 enum pedit_cmd {
@@ -45,6 +48,7 @@ enum pedit_cmd {
        TCA_PEDIT_KEY_EX_CMD_ADD = 1,
        __PEDIT_CMD_MAX,
 };
+
 #define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1)
 
 struct tc_pedit_key {
@@ -55,13 +59,14 @@ struct tc_pedit_key {
        __u32           offmask;
        __u32           shift;
 };
-                                                                                
+
 struct tc_pedit_sel {
        tc_gen;
        unsigned char           nkeys;
        unsigned char           flags;
        struct tc_pedit_key     keys[0];
 };
+
 #define tc_pedit tc_pedit_sel
 
 #endif
index fbcfe27a4e6c4173553fe675655f86c2e4f51045..6de6071ebed605f3165ad49eca2ac645b4cff5a9 100644 (file)
@@ -30,6 +30,7 @@
 #define SKBEDIT_F_MARK                 0x4
 #define SKBEDIT_F_PTYPE                        0x8
 #define SKBEDIT_F_MASK                 0x10
+#define SKBEDIT_F_INHERITDSFIELD       0x20
 
 struct tc_skbedit {
        tc_gen;
@@ -45,6 +46,7 @@ enum {
        TCA_SKBEDIT_PAD,
        TCA_SKBEDIT_PTYPE,
        TCA_SKBEDIT_MASK,
+       TCA_SKBEDIT_FLAGS,
        __TCA_SKBEDIT_MAX
 };
 #define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
index 72bbefe5d1d12234fcf34f00fd32acb0a3555fcb..e284fec8c467cc7bfdb121432f63dadfce42b712 100644 (file)
@@ -36,9 +36,35 @@ enum {
        TCA_TUNNEL_KEY_PAD,
        TCA_TUNNEL_KEY_ENC_DST_PORT,    /* be16 */
        TCA_TUNNEL_KEY_NO_CSUM,         /* u8 */
+       TCA_TUNNEL_KEY_ENC_OPTS,        /* Nested TCA_TUNNEL_KEY_ENC_OPTS_
+                                        * attributes
+                                        */
        __TCA_TUNNEL_KEY_MAX,
 };
 
 #define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1)
 
+enum {
+       TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC,
+       TCA_TUNNEL_KEY_ENC_OPTS_GENEVE,         /* Nested
+                                                * TCA_TUNNEL_KEY_ENC_OPTS_
+                                                * attributes
+                                                */
+       __TCA_TUNNEL_KEY_ENC_OPTS_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPTS_MAX (__TCA_TUNNEL_KEY_ENC_OPTS_MAX - 1)
+
+enum {
+       TCA_TUNNEL_KEY_ENC_OPT_GENEVE_UNSPEC,
+       TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,            /* be16 */
+       TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,             /* u8 */
+       TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,             /* 4 to 128 bytes */
+
+       __TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX \
+       (__TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX - 1)
+
 #endif
index 85c11982c89b38a3995db683d08b7f89ac2e3889..0ebe02ef1a86b1eeceeac99c8354bd38516a26e2 100644 (file)
@@ -121,6 +121,7 @@ enum {
        TIPC_NLA_SOCK_TIPC_STATE,       /* u32 */
        TIPC_NLA_SOCK_COOKIE,           /* u64 */
        TIPC_NLA_SOCK_PAD,              /* flag */
+       TIPC_NLA_SOCK_GROUP,            /* nest */
 
        __TIPC_NLA_SOCK_MAX,
        TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1
@@ -233,6 +234,19 @@ enum {
        TIPC_NLA_MON_PEER_MAX = __TIPC_NLA_MON_PEER_MAX - 1
 };
 
+/* Nest, socket group info */
+enum {
+       TIPC_NLA_SOCK_GROUP_ID,                 /* u32 */
+       TIPC_NLA_SOCK_GROUP_OPEN,               /* flag */
+       TIPC_NLA_SOCK_GROUP_NODE_SCOPE,         /* flag */
+       TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE,      /* flag */
+       TIPC_NLA_SOCK_GROUP_INSTANCE,           /* u32 */
+       TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT,       /* u32 */
+
+       __TIPC_NLA_SOCK_GROUP_MAX,
+       TIPC_NLA_SOCK_GROUP_MAX = __TIPC_NLA_SOCK_GROUP_MAX - 1
+};
+
 /* Nest, connection info */
 enum {
        TIPC_NLA_CON_UNSPEC,
index 9d4340c907d17d0c2ecacdd6762e36d7c9d6def5..1e1d9bd0bd3788711d8722e7ec9e1a15661d7c3b 100644 (file)
@@ -25,12 +25,16 @@ extern bool xen_pvh;
 #define xen_hvm_domain()       (xen_domain_type == XEN_HVM_DOMAIN)
 #define xen_pvh_domain()       (xen_pvh)
 
+#include <linux/types.h>
+
+extern uint32_t xen_start_flags;
+
 #ifdef CONFIG_XEN_DOM0
 #include <xen/interface/xen.h>
 #include <asm/xen/hypervisor.h>
 
 #define xen_initial_domain()   (xen_domain() && \
-                                xen_start_info && xen_start_info->flags & SIF_INITDOMAIN)
+                                (xen_start_flags & SIF_INITDOMAIN))
 #else  /* !CONFIG_XEN_DOM0 */
 #define xen_initial_domain()   (0)
 #endif /* CONFIG_XEN_DOM0 */
index 5a52f07259a2aab4ad5993801a6d15b5dfe5d4a3..041f3a022122d559b8588c8c24c8db37756464de 100644 (file)
@@ -1051,10 +1051,9 @@ config LD_DEAD_CODE_DATA_ELIMINATION
        depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
        depends on EXPERT
        help
-         Select this if the architecture wants to do dead code and
-         data elimination with the linker by compiling with
-         -ffunction-sections -fdata-sections, and linking with
-         --gc-sections.
+         Enable this if you want to do dead code and data elimination with
+         the linker by compiling with -ffunction-sections -fdata-sections,
+         and linking with --gc-sections.
 
          This can reduce on disk and in-memory size of the kernel
          code and static data, particularly for small configs and
@@ -1719,10 +1718,6 @@ source "arch/Kconfig"
 
 endmenu                # General setup
 
-config HAVE_GENERIC_DMA_COHERENT
-       bool
-       default n
-
 config RT_MUTEXES
        bool
 
index 3b654530259809d0241baa695136a59ad169fe4c..203281198079c660c18f9cabf8b9b61c26caf7d4 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -38,6 +38,7 @@
 #include <linux/rwsem.h>
 #include <linux/nsproxy.h>
 #include <linux/ipc_namespace.h>
+#include <linux/rhashtable.h>
 
 #include <asm/current.h>
 #include <linux/uaccess.h>
index 5af1943ad782b415a3dd331161e9b2ecccf89210..29c0347ef11ded8311f9a519533078327e715910 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -86,6 +86,7 @@
 #include <linux/ipc_namespace.h>
 #include <linux/sched/wake_q.h>
 #include <linux/nospec.h>
+#include <linux/rhashtable.h>
 
 #include <linux/uaccess.h>
 #include "util.h"
index 051a3e1fb8df9b2bcb2073e8299d31cdfc938724..d4daf78df6da190cb56eebbf5fc033c4a5ad1f8f 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -43,6 +43,7 @@
 #include <linux/nsproxy.h>
 #include <linux/mount.h>
 #include <linux/ipc_namespace.h>
+#include <linux/rhashtable.h>
 
 #include <linux/uaccess.h>
 
index 4e81182fa0ac48cad2ed3f5afdd8141e161a920b..fdffff41f65b546f66a5ee9b69297b39c445b8a0 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/rwsem.h>
 #include <linux/memory.h>
 #include <linux/ipc_namespace.h>
+#include <linux/rhashtable.h>
 
 #include <asm/unistd.h>
 
index d2001624fe7a31b788508e5da97924173bf2e33e..04bc07c2b42a9dfef399caea56a12b072f0ad028 100644 (file)
@@ -41,6 +41,7 @@ obj-y += printk/
 obj-y += irq/
 obj-y += rcu/
 obj-y += livepatch/
+obj-y += dma/
 
 obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
 obj-$(CONFIG_FREEZER) += freezer.o
index f7c00bd6f8e49ca9cc4e6ee323b01f718aebd9ec..3d83ee7df381b1def956b5e645376451d797440e 100644 (file)
@@ -428,6 +428,60 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
        return ret;
 }
 
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype, struct bpf_prog *prog)
+{
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
+                               attr->attach_flags);
+       cgroup_put(cgrp);
+       return ret;
+}
+
+int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+       struct bpf_prog *prog;
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
+       if (IS_ERR(prog))
+               prog = NULL;
+
+       ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
+       if (prog)
+               bpf_prog_put(prog);
+
+       cgroup_put(cgrp);
+       return ret;
+}
+
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                         union bpf_attr __user *uattr)
+{
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->query.target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       ret = cgroup_bpf_query(cgrp, attr, uattr);
+
+       cgroup_put(cgrp);
+       return ret;
+}
+
 /**
  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
  * @sk: The socket sending or receiving traffic
index 9f1493705f4043066033dd44ec6deb95e7418287..1e5625d46414cc68efe372b2c6a8dab266a24dd6 100644 (file)
@@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
        return prog_adj;
 }
 
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+{
+       int i;
+
+       for (i = 0; i < fp->aux->func_cnt; i++)
+               bpf_prog_kallsyms_del(fp->aux->func[i]);
+}
+
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
+{
+       bpf_prog_kallsyms_del_subprogs(fp);
+       bpf_prog_kallsyms_del(fp);
+}
+
 #ifdef CONFIG_BPF_JIT
 /* All BPF JIT sysctl knobs here. */
 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
@@ -1434,6 +1448,17 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
        return 0;
 }
 
+static void bpf_prog_select_func(struct bpf_prog *fp)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+       u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+
+       fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+#else
+       fp->bpf_func = __bpf_prog_ret0_warn;
+#endif
+}
+
 /**
  *     bpf_prog_select_runtime - select exec runtime for BPF program
  *     @fp: bpf_prog populated with internal BPF program
@@ -1444,13 +1469,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
  */
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
-       u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+       /* In case of BPF to BPF calls, verifier did all the prep
+        * work with regards to JITing, etc.
+        */
+       if (fp->bpf_func)
+               goto finalize;
 
-       fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
-#else
-       fp->bpf_func = __bpf_prog_ret0_warn;
-#endif
+       bpf_prog_select_func(fp);
 
        /* eBPF JITs can rewrite the program in case constant
         * blinding is active. However, in case of error during
@@ -1471,6 +1496,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
                if (*err)
                        return fp;
        }
+
+finalize:
        bpf_prog_lock_ro(fp);
 
        /* The tail call compatibility check can only be done at
index a7cc7b3494a90f582886485668562ccfef5f5ffd..642c97f6d1b8efeb07e0281680cd1dad26d0081b 100644 (file)
@@ -345,6 +345,20 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
        return bq_enqueue(dst, xdpf, dev_rx);
 }
 
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog)
+{
+       int err;
+
+       err = __xdp_generic_ok_fwd_dev(skb, dst->dev);
+       if (unlikely(err))
+               return err;
+       skb->dev = dst->dev;
+       generic_xdp_tx(skb, xdp_prog);
+
+       return 0;
+}
+
 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
 {
        struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
index 52a91d816c0eb9a1f9fe96fd77b3ffefd6145149..cf7b6a6dbd1f3bc290eb3f740e46a63ab3eba116 100644 (file)
@@ -72,6 +72,7 @@ struct bpf_htab {
        u32 n_buckets;
        u32 elem_size;
        struct bpf_sock_progs progs;
+       struct rcu_head rcu;
 };
 
 struct htab_elem {
@@ -89,8 +90,8 @@ enum smap_psock_state {
 struct smap_psock_map_entry {
        struct list_head list;
        struct sock **entry;
-       struct htab_elem *hash_link;
-       struct bpf_htab *htab;
+       struct htab_elem __rcu *hash_link;
+       struct bpf_htab __rcu *htab;
 };
 
 struct smap_psock {
@@ -120,6 +121,7 @@ struct smap_psock {
        struct bpf_prog *bpf_parse;
        struct bpf_prog *bpf_verdict;
        struct list_head maps;
+       spinlock_t maps_lock;
 
        /* Back reference used when sock callback trigger sockmap operations */
        struct sock *sock;
@@ -140,6 +142,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
                            int offset, size_t size, int flags);
+static void bpf_tcp_close(struct sock *sk, long timeout);
 
 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
 {
@@ -161,7 +164,42 @@ static bool bpf_tcp_stream_read(const struct sock *sk)
        return !empty;
 }
 
-static struct proto tcp_bpf_proto;
+enum {
+       SOCKMAP_IPV4,
+       SOCKMAP_IPV6,
+       SOCKMAP_NUM_PROTS,
+};
+
+enum {
+       SOCKMAP_BASE,
+       SOCKMAP_TX,
+       SOCKMAP_NUM_CONFIGS,
+};
+
+static struct proto *saved_tcpv6_prot __read_mostly;
+static DEFINE_SPINLOCK(tcpv6_prot_lock);
+static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
+static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
+                        struct proto *base)
+{
+       prot[SOCKMAP_BASE]                      = *base;
+       prot[SOCKMAP_BASE].close                = bpf_tcp_close;
+       prot[SOCKMAP_BASE].recvmsg              = bpf_tcp_recvmsg;
+       prot[SOCKMAP_BASE].stream_memory_read   = bpf_tcp_stream_read;
+
+       prot[SOCKMAP_TX]                        = prot[SOCKMAP_BASE];
+       prot[SOCKMAP_TX].sendmsg                = bpf_tcp_sendmsg;
+       prot[SOCKMAP_TX].sendpage               = bpf_tcp_sendpage;
+}
+
+static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
+{
+       int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
+       int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
+
+       sk->sk_prot = &bpf_tcp_prots[family][conf];
+}
+
 static int bpf_tcp_init(struct sock *sk)
 {
        struct smap_psock *psock;
@@ -181,14 +219,17 @@ static int bpf_tcp_init(struct sock *sk)
        psock->save_close = sk->sk_prot->close;
        psock->sk_proto = sk->sk_prot;
 
-       if (psock->bpf_tx_msg) {
-               tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
-               tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
-               tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
-               tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
+       /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
+       if (sk->sk_family == AF_INET6 &&
+           unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+               spin_lock_bh(&tcpv6_prot_lock);
+               if (likely(sk->sk_prot != saved_tcpv6_prot)) {
+                       build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
+                       smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+               }
+               spin_unlock_bh(&tcpv6_prot_lock);
        }
-
-       sk->sk_prot = &tcp_bpf_proto;
+       update_sk_prot(sk, psock);
        rcu_read_unlock();
        return 0;
 }
@@ -219,16 +260,54 @@ static void bpf_tcp_release(struct sock *sk)
        rcu_read_unlock();
 }
 
+static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
+                                        u32 hash, void *key, u32 key_size)
+{
+       struct htab_elem *l;
+
+       hlist_for_each_entry_rcu(l, head, hash_node) {
+               if (l->hash == hash && !memcmp(&l->key, key, key_size))
+                       return l;
+       }
+
+       return NULL;
+}
+
+static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+{
+       return &htab->buckets[hash & (htab->n_buckets - 1)];
+}
+
+static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+{
+       return &__select_bucket(htab, hash)->head;
+}
+
 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 {
        atomic_dec(&htab->count);
        kfree_rcu(l, rcu);
 }
 
+static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
+                                                 struct smap_psock *psock)
+{
+       struct smap_psock_map_entry *e;
+
+       spin_lock_bh(&psock->maps_lock);
+       e = list_first_entry_or_null(&psock->maps,
+                                    struct smap_psock_map_entry,
+                                    list);
+       if (e)
+               list_del(&e->list);
+       spin_unlock_bh(&psock->maps_lock);
+       return e;
+}
+
 static void bpf_tcp_close(struct sock *sk, long timeout)
 {
        void (*close_fun)(struct sock *sk, long timeout);
-       struct smap_psock_map_entry *e, *tmp;
+       struct smap_psock_map_entry *e;
        struct sk_msg_buff *md, *mtmp;
        struct smap_psock *psock;
        struct sock *osk;
@@ -247,7 +326,6 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
         */
        close_fun = psock->save_close;
 
-       write_lock_bh(&sk->sk_callback_lock);
        if (psock->cork) {
                free_start_sg(psock->sock, psock->cork);
                kfree(psock->cork);
@@ -260,20 +338,38 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                kfree(md);
        }
 
-       list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+       e = psock_map_pop(sk, psock);
+       while (e) {
                if (e->entry) {
                        osk = cmpxchg(e->entry, sk, NULL);
                        if (osk == sk) {
-                               list_del(&e->list);
                                smap_release_sock(psock, sk);
                        }
                } else {
-                       hlist_del_rcu(&e->hash_link->hash_node);
-                       smap_release_sock(psock, e->hash_link->sk);
-                       free_htab_elem(e->htab, e->hash_link);
+                       struct htab_elem *link = rcu_dereference(e->hash_link);
+                       struct bpf_htab *htab = rcu_dereference(e->htab);
+                       struct hlist_head *head;
+                       struct htab_elem *l;
+                       struct bucket *b;
+
+                       b = __select_bucket(htab, link->hash);
+                       head = &b->head;
+                       raw_spin_lock_bh(&b->lock);
+                       l = lookup_elem_raw(head,
+                                           link->hash, link->key,
+                                           htab->map.key_size);
+                       /* If another thread deleted this object skip deletion.
+                        * The refcnt on psock may or may not be zero.
+                        */
+                       if (l) {
+                               hlist_del_rcu(&link->hash_node);
+                               smap_release_sock(psock, link->sk);
+                               free_htab_elem(htab, link);
+                       }
+                       raw_spin_unlock_bh(&b->lock);
                }
+               e = psock_map_pop(sk, psock);
        }
-       write_unlock_bh(&sk->sk_callback_lock);
        rcu_read_unlock();
        close_fun(sk, timeout);
 }
@@ -1111,8 +1207,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock,
 
 static int bpf_tcp_ulp_register(void)
 {
-       tcp_bpf_proto = tcp_prot;
-       tcp_bpf_proto.close = bpf_tcp_close;
+       build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
        /* Once BPF TX ULP is registered it is never unregistered. It
         * will be in the ULP list for the lifetime of the system. Doing
         * duplicate registers is not a problem.
@@ -1357,7 +1452,9 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
 {
        if (refcount_dec_and_test(&psock->refcnt)) {
                tcp_cleanup_ulp(sock);
+               write_lock_bh(&sock->sk_callback_lock);
                smap_stop_sock(psock, sock);
+               write_unlock_bh(&sock->sk_callback_lock);
                clear_bit(SMAP_TX_RUNNING, &psock->state);
                rcu_assign_sk_user_data(sock, NULL);
                call_rcu_sched(&psock->rcu, smap_destroy_psock);
@@ -1508,6 +1605,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock, int node)
        INIT_LIST_HEAD(&psock->maps);
        INIT_LIST_HEAD(&psock->ingress);
        refcount_set(&psock->refcnt, 1);
+       spin_lock_init(&psock->maps_lock);
 
        rcu_assign_sk_user_data(sock, psock);
        sock_hold(sock);
@@ -1564,18 +1662,32 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
        return ERR_PTR(err);
 }
 
-static void smap_list_remove(struct smap_psock *psock,
-                            struct sock **entry,
-                            struct htab_elem *hash_link)
+static void smap_list_map_remove(struct smap_psock *psock,
+                                struct sock **entry)
 {
        struct smap_psock_map_entry *e, *tmp;
 
+       spin_lock_bh(&psock->maps_lock);
        list_for_each_entry_safe(e, tmp, &psock->maps, list) {
-               if (e->entry == entry || e->hash_link == hash_link) {
+               if (e->entry == entry)
                        list_del(&e->list);
-                       break;
-               }
        }
+       spin_unlock_bh(&psock->maps_lock);
+}
+
+static void smap_list_hash_remove(struct smap_psock *psock,
+                                 struct htab_elem *hash_link)
+{
+       struct smap_psock_map_entry *e, *tmp;
+
+       spin_lock_bh(&psock->maps_lock);
+       list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+               struct htab_elem *c = rcu_dereference(e->hash_link);
+
+               if (c == hash_link)
+                       list_del(&e->list);
+       }
+       spin_unlock_bh(&psock->maps_lock);
 }
 
 static void sock_map_free(struct bpf_map *map)
@@ -1601,7 +1713,6 @@ static void sock_map_free(struct bpf_map *map)
                if (!sock)
                        continue;
 
-               write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
                /* This check handles a racing sock event that can get the
                 * sk_callback_lock before this case but after xchg happens
@@ -1609,10 +1720,9 @@ static void sock_map_free(struct bpf_map *map)
                 * to be null and queued for garbage collection.
                 */
                if (likely(psock)) {
-                       smap_list_remove(psock, &stab->sock_map[i], NULL);
+                       smap_list_map_remove(psock, &stab->sock_map[i]);
                        smap_release_sock(psock, sock);
                }
-               write_unlock_bh(&sock->sk_callback_lock);
        }
        rcu_read_unlock();
 
@@ -1661,17 +1771,15 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key)
        if (!sock)
                return -EINVAL;
 
-       write_lock_bh(&sock->sk_callback_lock);
        psock = smap_psock_sk(sock);
        if (!psock)
                goto out;
 
        if (psock->bpf_parse)
                smap_stop_sock(psock, sock);
-       smap_list_remove(psock, &stab->sock_map[k], NULL);
+       smap_list_map_remove(psock, &stab->sock_map[k]);
        smap_release_sock(psock, sock);
 out:
-       write_unlock_bh(&sock->sk_callback_lock);
        return 0;
 }
 
@@ -1752,7 +1860,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                }
        }
 
-       write_lock_bh(&sock->sk_callback_lock);
        psock = smap_psock_sk(sock);
 
        /* 2. Do not allow inheriting programs if psock exists and has
@@ -1809,7 +1916,9 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                if (err)
                        goto out_free;
                smap_init_progs(psock, verdict, parse);
+               write_lock_bh(&sock->sk_callback_lock);
                smap_start_sock(psock, sock);
+               write_unlock_bh(&sock->sk_callback_lock);
        }
 
        /* 4. Place psock in sockmap for use and stop any programs on
@@ -1819,9 +1928,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
         */
        if (map_link) {
                e->entry = map_link;
+               spin_lock_bh(&psock->maps_lock);
                list_add_tail(&e->list, &psock->maps);
+               spin_unlock_bh(&psock->maps_lock);
        }
-       write_unlock_bh(&sock->sk_callback_lock);
        return err;
 out_free:
        smap_release_sock(psock, sock);
@@ -1832,7 +1942,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
        }
        if (tx_msg)
                bpf_prog_put(tx_msg);
-       write_unlock_bh(&sock->sk_callback_lock);
        kfree(e);
        return err;
 }
@@ -1869,10 +1978,8 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
        if (osock) {
                struct smap_psock *opsock = smap_psock_sk(osock);
 
-               write_lock_bh(&osock->sk_callback_lock);
-               smap_list_remove(opsock, &stab->sock_map[i], NULL);
+               smap_list_map_remove(opsock, &stab->sock_map[i]);
                smap_release_sock(opsock, osock);
-               write_unlock_bh(&osock->sk_callback_lock);
        }
 out:
        return err;
@@ -1915,6 +2022,24 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
        return 0;
 }
 
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                       struct bpf_prog *prog)
+{
+       int ufd = attr->target_fd;
+       struct bpf_map *map;
+       struct fd f;
+       int err;
+
+       f = fdget(ufd);
+       map = __bpf_map_get(f);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
+
+       err = sock_map_prog(map, prog, attr->attach_type);
+       fdput(f);
+       return err;
+}
+
 static void *sock_map_lookup(struct bpf_map *map, void *key)
 {
        return NULL;
@@ -2043,14 +2168,13 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
        return ERR_PTR(err);
 }
 
-static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+static void __bpf_htab_free(struct rcu_head *rcu)
 {
-       return &htab->buckets[hash & (htab->n_buckets - 1)];
-}
+       struct bpf_htab *htab;
 
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
-{
-       return &__select_bucket(htab, hash)->head;
+       htab = container_of(rcu, struct bpf_htab, rcu);
+       bpf_map_area_free(htab->buckets);
+       kfree(htab);
 }
 
 static void sock_hash_free(struct bpf_map *map)
@@ -2069,16 +2193,18 @@ static void sock_hash_free(struct bpf_map *map)
         */
        rcu_read_lock();
        for (i = 0; i < htab->n_buckets; i++) {
-               struct hlist_head *head = select_bucket(htab, i);
+               struct bucket *b = __select_bucket(htab, i);
+               struct hlist_head *head;
                struct hlist_node *n;
                struct htab_elem *l;
 
+               raw_spin_lock_bh(&b->lock);
+               head = &b->head;
                hlist_for_each_entry_safe(l, n, head, hash_node) {
                        struct sock *sock = l->sk;
                        struct smap_psock *psock;
 
                        hlist_del_rcu(&l->hash_node);
-                       write_lock_bh(&sock->sk_callback_lock);
                        psock = smap_psock_sk(sock);
                        /* This check handles a racing sock event that can get
                         * the sk_callback_lock before this case but after xchg
@@ -2086,16 +2212,15 @@ static void sock_hash_free(struct bpf_map *map)
                         * (psock) to be null and queued for garbage collection.
                         */
                        if (likely(psock)) {
-                               smap_list_remove(psock, NULL, l);
+                               smap_list_hash_remove(psock, l);
                                smap_release_sock(psock, sock);
                        }
-                       write_unlock_bh(&sock->sk_callback_lock);
-                       kfree(l);
+                       free_htab_elem(htab, l);
                }
+               raw_spin_unlock_bh(&b->lock);
        }
        rcu_read_unlock();
-       bpf_map_area_free(htab->buckets);
-       kfree(htab);
+       call_rcu(&htab->rcu, __bpf_htab_free);
 }
 
 static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
@@ -2122,19 +2247,6 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
        return l_new;
 }
 
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
-                                        u32 hash, void *key, u32 key_size)
-{
-       struct htab_elem *l;
-
-       hlist_for_each_entry_rcu(l, head, hash_node) {
-               if (l->hash == hash && !memcmp(&l->key, key, key_size))
-                       return l;
-       }
-
-       return NULL;
-}
-
 static inline u32 htab_map_hash(const void *key, u32 key_len)
 {
        return jhash(key, key_len, 0);
@@ -2254,9 +2366,12 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
                goto bucket_err;
        }
 
-       e->hash_link = l_new;
-       e->htab = container_of(map, struct bpf_htab, map);
+       rcu_assign_pointer(e->hash_link, l_new);
+       rcu_assign_pointer(e->htab,
+                          container_of(map, struct bpf_htab, map));
+       spin_lock_bh(&psock->maps_lock);
        list_add_tail(&e->list, &psock->maps);
+       spin_unlock_bh(&psock->maps_lock);
 
        /* add new element to the head of the list, so that
         * concurrent search will find it before old elem
@@ -2266,7 +2381,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
                psock = smap_psock_sk(l_old->sk);
 
                hlist_del_rcu(&l_old->hash_node);
-               smap_list_remove(psock, NULL, l_old);
+               smap_list_hash_remove(psock, l_old);
                smap_release_sock(psock, l_old->sk);
                free_htab_elem(htab, l_old);
        }
@@ -2326,7 +2441,6 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
                struct smap_psock *psock;
 
                hlist_del_rcu(&l->hash_node);
-               write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
                /* This check handles a racing sock event that can get the
                 * sk_callback_lock before this case but after xchg happens
@@ -2334,10 +2448,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
                 * to be null and queued for garbage collection.
                 */
                if (likely(psock)) {
-                       smap_list_remove(psock, NULL, l);
+                       smap_list_hash_remove(psock, l);
                        smap_release_sock(psock, sock);
                }
-               write_unlock_bh(&sock->sk_callback_lock);
                free_htab_elem(htab, l);
                ret = 0;
        }
@@ -2383,6 +2496,7 @@ const struct bpf_map_ops sock_hash_ops = {
        .map_get_next_key = sock_hash_get_next_key,
        .map_update_elem = sock_hash_update_elem,
        .map_delete_elem = sock_hash_delete_elem,
+       .map_release_uref = sock_map_release,
 };
 
 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
index 0fa20624707f23b15d3200c1302765a119ee9fc1..d10ecd78105fa66d2ffbd6b27c42fec2a6e6b09c 100644 (file)
@@ -1034,14 +1034,9 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
-               int i;
-
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
-
-               for (i = 0; i < prog->aux->func_cnt; i++)
-                       bpf_prog_kallsyms_del(prog->aux->func[i]);
-               bpf_prog_kallsyms_del(prog);
+               bpf_prog_kallsyms_del_all(prog);
 
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
        }
@@ -1358,9 +1353,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        if (err < 0)
                goto free_used_maps;
 
-       /* eBPF program is ready to be JITed */
-       if (!prog->bpf_func)
-               prog = bpf_prog_select_runtime(prog, &err);
+       prog = bpf_prog_select_runtime(prog, &err);
        if (err < 0)
                goto free_used_maps;
 
@@ -1384,6 +1377,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        return err;
 
 free_used_maps:
+       bpf_prog_kallsyms_del_subprogs(prog);
        free_used_maps(prog->aux);
 free_prog:
        bpf_prog_uncharge_memlock(prog);
@@ -1489,8 +1483,6 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
        return err;
 }
 
-#ifdef CONFIG_CGROUP_BPF
-
 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
                                             enum bpf_attach_type attach_type)
 {
@@ -1505,40 +1497,6 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
 
 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
 
-static int sockmap_get_from_fd(const union bpf_attr *attr,
-                              int type, bool attach)
-{
-       struct bpf_prog *prog = NULL;
-       int ufd = attr->target_fd;
-       struct bpf_map *map;
-       struct fd f;
-       int err;
-
-       f = fdget(ufd);
-       map = __bpf_map_get(f);
-       if (IS_ERR(map))
-               return PTR_ERR(map);
-
-       if (attach) {
-               prog = bpf_prog_get_type(attr->attach_bpf_fd, type);
-               if (IS_ERR(prog)) {
-                       fdput(f);
-                       return PTR_ERR(prog);
-               }
-       }
-
-       err = sock_map_prog(map, prog, attr->attach_type);
-       if (err) {
-               fdput(f);
-               if (prog)
-                       bpf_prog_put(prog);
-               return err;
-       }
-
-       fdput(f);
-       return 0;
-}
-
 #define BPF_F_ATTACH_MASK \
        (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
 
@@ -1546,7 +1504,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
        struct bpf_prog *prog;
-       struct cgroup *cgrp;
        int ret;
 
        if (!capable(CAP_NET_ADMIN))
@@ -1583,12 +1540,15 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
                break;
        case BPF_SK_MSG_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true);
+               ptype = BPF_PROG_TYPE_SK_MSG;
+               break;
        case BPF_SK_SKB_STREAM_PARSER:
        case BPF_SK_SKB_STREAM_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true);
+               ptype = BPF_PROG_TYPE_SK_SKB;
+               break;
        case BPF_LIRC_MODE2:
-               return lirc_prog_attach(attr);
+               ptype = BPF_PROG_TYPE_LIRC_MODE2;
+               break;
        default:
                return -EINVAL;
        }
@@ -1602,18 +1562,20 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                return -EINVAL;
        }
 
-       cgrp = cgroup_get_from_fd(attr->target_fd);
-       if (IS_ERR(cgrp)) {
-               bpf_prog_put(prog);
-               return PTR_ERR(cgrp);
+       switch (ptype) {
+       case BPF_PROG_TYPE_SK_SKB:
+       case BPF_PROG_TYPE_SK_MSG:
+               ret = sockmap_get_from_fd(attr, ptype, prog);
+               break;
+       case BPF_PROG_TYPE_LIRC_MODE2:
+               ret = lirc_prog_attach(attr, prog);
+               break;
+       default:
+               ret = cgroup_bpf_prog_attach(attr, ptype, prog);
        }
 
-       ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
-                               attr->attach_flags);
        if (ret)
                bpf_prog_put(prog);
-       cgroup_put(cgrp);
-
        return ret;
 }
 
@@ -1622,9 +1584,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 static int bpf_prog_detach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
-       struct bpf_prog *prog;
-       struct cgroup *cgrp;
-       int ret;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -1657,29 +1616,17 @@ static int bpf_prog_detach(const union bpf_attr *attr)
                ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
                break;
        case BPF_SK_MSG_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false);
+               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL);
        case BPF_SK_SKB_STREAM_PARSER:
        case BPF_SK_SKB_STREAM_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false);
+               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL);
        case BPF_LIRC_MODE2:
                return lirc_prog_detach(attr);
        default:
                return -EINVAL;
        }
 
-       cgrp = cgroup_get_from_fd(attr->target_fd);
-       if (IS_ERR(cgrp))
-               return PTR_ERR(cgrp);
-
-       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
-       if (IS_ERR(prog))
-               prog = NULL;
-
-       ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
-       if (prog)
-               bpf_prog_put(prog);
-       cgroup_put(cgrp);
-       return ret;
+       return cgroup_bpf_prog_detach(attr, ptype);
 }
 
 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
@@ -1687,9 +1634,6 @@ static int bpf_prog_detach(const union bpf_attr *attr)
 static int bpf_prog_query(const union bpf_attr *attr,
                          union bpf_attr __user *uattr)
 {
-       struct cgroup *cgrp;
-       int ret;
-
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
        if (CHECK_ATTR(BPF_PROG_QUERY))
@@ -1717,14 +1661,9 @@ static int bpf_prog_query(const union bpf_attr *attr,
        default:
                return -EINVAL;
        }
-       cgrp = cgroup_get_from_fd(attr->query.target_fd);
-       if (IS_ERR(cgrp))
-               return PTR_ERR(cgrp);
-       ret = cgroup_bpf_query(cgrp, attr, uattr);
-       cgroup_put(cgrp);
-       return ret;
+
+       return cgroup_bpf_prog_query(attr, uattr);
 }
-#endif /* CONFIG_CGROUP_BPF */
 
 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
 
@@ -2371,7 +2310,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_OBJ_GET:
                err = bpf_obj_get(&attr);
                break;
-#ifdef CONFIG_CGROUP_BPF
        case BPF_PROG_ATTACH:
                err = bpf_prog_attach(&attr);
                break;
@@ -2381,7 +2319,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_PROG_QUERY:
                err = bpf_prog_query(&attr, uattr);
                break;
-#endif
        case BPF_PROG_TEST_RUN:
                err = bpf_prog_test_run(&attr, uattr);
                break;
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
new file mode 100644 (file)
index 0000000..9bd5430
--- /dev/null
@@ -0,0 +1,50 @@
+
+config HAS_DMA
+       bool
+       depends on !NO_DMA
+       default y
+
+config NEED_SG_DMA_LENGTH
+       bool
+
+config NEED_DMA_MAP_STATE
+       bool
+
+config ARCH_DMA_ADDR_T_64BIT
+       def_bool 64BIT || PHYS_ADDR_T_64BIT
+
+config HAVE_GENERIC_DMA_COHERENT
+       bool
+
+config ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       bool
+
+config ARCH_HAS_SYNC_DMA_FOR_CPU
+       bool
+       select NEED_DMA_MAP_STATE
+
+config DMA_DIRECT_OPS
+       bool
+       depends on HAS_DMA
+
+config DMA_NONCOHERENT_OPS
+       bool
+       depends on HAS_DMA
+       select DMA_DIRECT_OPS
+
+config DMA_NONCOHERENT_MMAP
+       bool
+       depends on DMA_NONCOHERENT_OPS
+
+config DMA_NONCOHERENT_CACHE_SYNC
+       bool
+       depends on DMA_NONCOHERENT_OPS
+
+config DMA_VIRT_OPS
+       bool
+       depends on HAS_DMA
+
+config SWIOTLB
+       bool
+       select DMA_DIRECT_OPS
+       select NEED_DMA_MAP_STATE
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
new file mode 100644 (file)
index 0000000..6de44e4
--- /dev/null
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_HAS_DMA)                  += mapping.o
+obj-$(CONFIG_DMA_CMA)                  += contiguous.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
+obj-$(CONFIG_DMA_DIRECT_OPS)           += direct.o
+obj-$(CONFIG_DMA_NONCOHERENT_OPS)      += noncoherent.o
+obj-$(CONFIG_DMA_VIRT_OPS)             += virt.o
+obj-$(CONFIG_DMA_API_DEBUG)            += debug.o
+obj-$(CONFIG_SWIOTLB)                  += swiotlb.o
+
similarity index 100%
rename from lib/dma-debug.c
rename to kernel/dma/debug.c
similarity index 100%
rename from lib/dma-direct.c
rename to kernel/dma/direct.c
similarity index 99%
rename from drivers/base/dma-mapping.c
rename to kernel/dma/mapping.c
index f831a582209c63b7412a6a4276ed7f1205371610..d2a92ddaac4d14c8683433856672fddbace7a4c9 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
+ * arch-independent dma-mapping routines
  *
  * Copyright (c) 2006  SUSE Linux Products GmbH
  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
similarity index 99%
rename from lib/swiotlb.c
rename to kernel/dma/swiotlb.c
index 04b68d9dfface72cd56d1f48ec0a58c68d2c2032..904541055792bf227faaf49734cd737142fd4111 100644 (file)
@@ -1085,3 +1085,4 @@ const struct dma_map_ops swiotlb_dma_ops = {
        .unmap_page             = swiotlb_unmap_page,
        .dma_supported          = dma_direct_supported,
 };
+EXPORT_SYMBOL(swiotlb_dma_ops);
similarity index 98%
rename from lib/dma-virt.c
rename to kernel/dma/virt.c
index 8e61a02ef9ca06cb2aabfaef7484a44f31610ef1..631ddec4b60a8b94576b1c3a36db71a818bff5d8 100644 (file)
@@ -1,7 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- *     lib/dma-virt.c
- *
  * DMA operations that map to virtual addresses without flushing memory.
  */
 #include <linux/export.h>
index 80cca2b30c4fe02c1baca59b08c0cae158a7368e..8f0434a9951af00bce3f009c21ddbd3c5cd61e01 100644 (file)
@@ -6482,7 +6482,7 @@ void perf_prepare_sample(struct perf_event_header *header,
                data->phys_addr = perf_virt_to_phys(data->addr);
 }
 
-static void __always_inline
+static __always_inline void
 __perf_event_output(struct perf_event *event,
                    struct perf_sample_data *data,
                    struct pt_regs *regs,
index 045a37e9ddee3255fac6ab34b80682f4e2e968e1..5d3cf407e37469a7b1cafab8c4af303d074bbdf8 100644 (file)
@@ -103,7 +103,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
        preempt_enable();
 }
 
-static bool __always_inline
+static __always_inline bool
 ring_buffer_has_space(unsigned long head, unsigned long tail,
                      unsigned long data_size, unsigned int size,
                      bool backward)
@@ -114,7 +114,7 @@ ring_buffer_has_space(unsigned long head, unsigned long tail,
                return CIRC_SPACE(tail, head, data_size) >= size;
 }
 
-static int __always_inline
+static __always_inline int
 __perf_output_begin(struct perf_output_handle *handle,
                    struct perf_event *event, unsigned int size,
                    bool backward)
@@ -414,7 +414,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
 }
 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
 
-static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
+static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
 {
        if (rb->aux_overwrite)
                return false;
index 4dadeb3d666621239a7273f7651847fa7099dacf..6f636136cccc05993e20034e92effc0c0fc3e7e2 100644 (file)
@@ -55,6 +55,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
        BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
        BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
        BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
+       BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
 };
 
 static void
index edcac5de7ebcdb489113800c941274d8887f9b56..5fa4d3138bf106cd87f822636652c144e846f3aa 100644 (file)
@@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
        this.parent = NULL;
        this.class = class;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        arch_spin_lock(&lockdep_lock);
        ret = __lockdep_count_forward_deps(&this);
        arch_spin_unlock(&lockdep_lock);
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 
        return ret;
 }
@@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
        this.parent = NULL;
        this.class = class;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        arch_spin_lock(&lockdep_lock);
        ret = __lockdep_count_backward_deps(&this);
        arch_spin_unlock(&lockdep_lock);
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 
        return ret;
 }
@@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
        if (unlikely(!debug_locks))
                return;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        for (i = 0; i < curr->lockdep_depth; i++) {
                hlock = curr->held_locks + i;
 
@@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
                print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
                break;
        }
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
index bc1e507be9ff7aea311261e78002d53375f9a6d7..776308d2fa9e9468116f0174eed4b8062475a83f 100644 (file)
@@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
        might_sleep();
 
        __down_read(sem);
+       rwsem_set_reader_owned(sem);
 }
 
 EXPORT_SYMBOL(down_read_non_owner);
index ae306f90c51484fae6bb583733ca5e8f8b3e76be..22b6acf1ad6377b502e3a77b54939eec5d27d139 100644 (file)
@@ -251,10 +251,10 @@ static int rseq_ip_fixup(struct pt_regs *regs)
  * respect to other threads scheduled on the same CPU, and with respect
  * to signal handlers.
  */
-void __rseq_handle_notify_resume(struct pt_regs *regs)
+void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
 {
        struct task_struct *t = current;
-       int ret;
+       int ret, sig;
 
        if (unlikely(t->flags & PF_EXITING))
                return;
@@ -268,7 +268,8 @@ void __rseq_handle_notify_resume(struct pt_regs *regs)
        return;
 
 error:
-       force_sig(SIGSEGV, t);
+       sig = ksig ? ksig->sig : 0;
+       force_sigsegv(sig, t);
 }
 
 #ifdef CONFIG_DEBUG_RSEQ
index de2f57fddc04ed85f5d419fe64e51cdcbb93193b..900dcfee542ced866dd3d55274383ac1c4ae7284 100644 (file)
@@ -139,9 +139,13 @@ static void __local_bh_enable(unsigned int cnt)
 {
        lockdep_assert_irqs_disabled();
 
+       if (preempt_count() == cnt)
+               trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+
        if (softirq_count() == (cnt & SOFTIRQ_MASK))
                trace_softirqs_on(_RET_IP_);
-       preempt_count_sub(cnt);
+
+       __preempt_count_sub(cnt);
 }
 
 /*
index 055a4a728c00cce3945afc04b9bee692b243896b..3e93c54bd3a16b7fc282a20064f5d75f7c812ee8 100644 (file)
@@ -1659,7 +1659,7 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
 int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
 {
        switch(restart->nanosleep.type) {
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
        case TT_COMPAT:
                if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp))
                        return -EFAULT;
index 5a6251ac6f7acd183c35a51d9d55fb680fda64dd..9cdf54b04ca8860b7aa2eec2e3625de076b5f7e2 100644 (file)
@@ -604,7 +604,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
        /*
         * Disarm any old timer after extracting its expiry time.
         */
-       lockdep_assert_irqs_disabled();
 
        ret = 0;
        old_incr = timer->it.cpu.incr;
@@ -1049,7 +1048,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
        /*
         * Now re-arm for the new expiry time.
         */
-       lockdep_assert_irqs_disabled();
        arm_timer(timer);
 unlock:
        unlock_task_sighand(p, &flags);
index 6fa99213fc720e4b77c467ae69a87007c22b37d2..2b41e8e2d31db26faaaf905543af749463939b9c 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include <linux/export.h>
+#include <linux/kernel.h>
 #include <linux/timex.h>
 #include <linux/capability.h>
 #include <linux/timekeeper_internal.h>
@@ -314,9 +315,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
        return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
 #else
 # if BITS_PER_LONG == 32
-       return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
+       return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
+              HZ_TO_MSEC_SHR32;
 # else
-       return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
+       return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
 # endif
 #endif
 }
index c9336e98ac59a778d31c16a9ac72b184477e7177..a0079b4c7a4956444f6e8d4a0a5c1d7df1880d2e 100644 (file)
@@ -1360,8 +1360,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 void
 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-       struct ring_buffer *buf;
-
        if (tr->stop_count)
                return;
 
@@ -1375,9 +1373,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        arch_spin_lock(&tr->max_lock);
 
-       buf = tr->trace_buffer.buffer;
-       tr->trace_buffer.buffer = tr->max_buffer.buffer;
-       tr->max_buffer.buffer = buf;
+       swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
 
        __update_max_tr(tr, tsk, cpu);
        arch_spin_unlock(&tr->max_lock);
index e1c818dbc0d724c603be39463b83de8f021cf79f..0dceb77d1d42b3afa6c38dc7dce22ad163a48ad3 100644 (file)
@@ -78,7 +78,8 @@ static const char * ops[] = { OPS };
        C(TOO_MANY_PREDS,       "Too many terms in predicate expression"), \
        C(INVALID_FILTER,       "Meaningless filter expression"),       \
        C(IP_FIELD_ONLY,        "Only 'ip' field is supported for function trace"), \
-       C(INVALID_VALUE,        "Invalid value (did you forget quotes)?"),
+       C(INVALID_VALUE,        "Invalid value (did you forget quotes)?"), \
+       C(NO_FILTER,            "No filter found"),
 
 #undef C
 #define C(a, b)                FILT_ERR_##a
@@ -550,6 +551,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
                goto out_free;
        }
 
+       if (!N) {
+               /* No program? */
+               ret = -EINVAL;
+               parse_error(pe, FILT_ERR_NO_FILTER, ptr - str);
+               goto out_free;
+       }
+
        prog[N].pred = NULL;                                    /* #13 */
        prog[N].target = 1;             /* TRUE */
        prog[N+1].pred = NULL;
index e34b04b56057a86cd0ade5cb9fcb4919730f80a2..706836ec314d2add83b84ebe59dec8fd7e13a7ad 100644 (file)
@@ -420,60 +420,15 @@ config HAS_IOPORT_MAP
        depends on HAS_IOMEM && !NO_IOPORT_MAP
        default y
 
-config HAS_DMA
-       bool
-       depends on !NO_DMA
-       default y
+source "kernel/dma/Kconfig"
 
 config SGL_ALLOC
        bool
        default n
 
-config NEED_SG_DMA_LENGTH
-       bool
-
-config NEED_DMA_MAP_STATE
-       bool
-
-config ARCH_DMA_ADDR_T_64BIT
-       def_bool 64BIT || PHYS_ADDR_T_64BIT
-
 config IOMMU_HELPER
        bool
 
-config ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       bool
-
-config ARCH_HAS_SYNC_DMA_FOR_CPU
-       bool
-       select NEED_DMA_MAP_STATE
-
-config DMA_DIRECT_OPS
-       bool
-       depends on HAS_DMA
-
-config DMA_NONCOHERENT_OPS
-       bool
-       depends on HAS_DMA
-       select DMA_DIRECT_OPS
-
-config DMA_NONCOHERENT_MMAP
-       bool
-       depends on DMA_NONCOHERENT_OPS
-
-config DMA_NONCOHERENT_CACHE_SYNC
-       bool
-       depends on DMA_NONCOHERENT_OPS
-
-config DMA_VIRT_OPS
-       bool
-       depends on HAS_DMA
-
-config SWIOTLB
-       bool
-       select DMA_DIRECT_OPS
-       select NEED_DMA_MAP_STATE
-
 config CHECK_SIGNATURE
        bool
 
index 3d35d062970d2459ecee5573cf512a999061b3ab..c253c1b46c6b12b2a7f2f3879bba0cf698a47866 100644 (file)
@@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN
 config KASAN
        bool "KASan: runtime memory debugger"
        depends on SLUB || (SLAB && !DEBUG_SLAB)
+       select SLUB_DEBUG if SLUB
        select CONSTRUCTORS
        select STACKDEPOT
        help
index 956b320292fef9a4055a1a955f37b6f41c2a4b71..90dc5520b7849dc69dc4c3df3ea419c45e9451cc 100644 (file)
@@ -23,15 +23,12 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         sha1.o chacha20.o irq_regs.o argv_split.o \
         flex_proportions.o ratelimit.o show_mem.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
-        earlycpio.o seq_buf.o siphash.o \
+        earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
         nmi_backtrace.o nodemask.o win_minmax.o
 
 lib-$(CONFIG_PRINTK) += dump_stack.o
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
-lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o
-lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
 
 lib-y  += kobject.o klist.o
 obj-y  += lockref.o
@@ -98,10 +95,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 obj-$(CONFIG_DEBUG_LIST) += list_debug.o
 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
 
-ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
-  lib-y += dec_and_lock.o
-endif
-
 obj-$(CONFIG_BITREVERSE) += bitrev.o
 obj-$(CONFIG_RATIONAL) += rational.o
 obj-$(CONFIG_CRC_CCITT)        += crc-ccitt.o
@@ -148,7 +141,6 @@ obj-$(CONFIG_SMP) += percpu_counter.o
 obj-$(CONFIG_AUDIT_GENERIC) += audit.o
 obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
 
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
 obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
 obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
 obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
@@ -169,8 +161,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o
 
 obj-$(CONFIG_LRU_CACHE) += lru_cache.o
 
-obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
-
 obj-$(CONFIG_GENERIC_CSUM) += checksum.o
 
 obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
index 347fa7ac2e8a858827415d44725d256d2a9e96a3..9555b68bb774cc3277dca434d19880286d71df0e 100644 (file)
@@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
+
+int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+                                unsigned long *flags)
+{
+       /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+       if (atomic_add_unless(atomic, -1, 1))
+               return 0;
+
+       /* Otherwise do it the slow way */
+       spin_lock_irqsave(lock, *flags);
+       if (atomic_dec_and_test(atomic))
+               return 1;
+       spin_unlock_irqrestore(lock, *flags);
+       return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
index dfa55c873c1318643fdbcbe916b9c18a54edc4c9..e335bcafa9e4c3012de2f0f8606c3e542008f7b3 100644 (file)
@@ -253,8 +253,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
                        if (policy) {
                                err = validate_nla(nla, maxtype, policy);
                                if (err < 0) {
-                                       if (extack)
-                                               extack->bad_attr = nla;
+                                       NL_SET_ERR_MSG_ATTR(extack, nla,
+                                                           "Attribute failed policy validation");
                                        goto errout;
                                }
                        }
index 9bbd9c5d375a2c8bf9a6d950ba42ab556c12063b..beb14839b41ae3c04fd698ec33a34727a2bc92d5 100644 (file)
@@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
        spin_lock_irqsave(&tags->lock, flags);
 
        /* Fastpath */
-       if (likely(tags->nr_free >= 0)) {
+       if (likely(tags->nr_free)) {
                tag = tags->freelist[--tags->nr_free];
                spin_unlock_irqrestore(&tags->lock, flags);
                return tag;
index fcb4ce682c6fae37d71aa6138007b51cd0d8a2b3..bf043258fa0082b4cc1100a982d2ed38e21b1c0e 100644 (file)
@@ -1,4 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
 #include <linux/kernel.h>
 #include <asm/div64.h>
 #include <linux/reciprocal_div.h>
@@ -26,3 +27,43 @@ struct reciprocal_value reciprocal_value(u32 d)
        return R;
 }
 EXPORT_SYMBOL(reciprocal_value);
+
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
+{
+       struct reciprocal_value_adv R;
+       u32 l, post_shift;
+       u64 mhigh, mlow;
+
+       /* ceil(log2(d)) */
+       l = fls(d - 1);
+       /* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
+        * be handled before calling "reciprocal_value_adv", please see the
+        * comment at include/linux/reciprocal_div.h.
+        */
+       WARN(l == 32,
+            "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
+            d, __func__);
+       post_shift = l;
+       mlow = 1ULL << (32 + l);
+       do_div(mlow, d);
+       mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
+       do_div(mhigh, d);
+
+       for (; post_shift > 0; post_shift--) {
+               u64 lo = mlow >> 1, hi = mhigh >> 1;
+
+               if (lo >= hi)
+                       break;
+
+               mlow = lo;
+               mhigh = hi;
+       }
+
+       R.m = (u32)mhigh;
+       R.sh = post_shift;
+       R.exp = l;
+       R.is_wide_m = mhigh > U32_MAX;
+
+       return R;
+}
+EXPORT_SYMBOL(reciprocal_value_adv);
index 0eb48353abe30164d4ae564aa21bee901fad72c3..d3b81cefce91a83698c3127d562f7e81cac0b55f 100644 (file)
@@ -350,3 +350,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 }
 EXPORT_SYMBOL(refcount_dec_and_lock);
 
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ *                                 interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ *         otherwise
+ */
+bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
+                                  unsigned long *flags)
+{
+       if (refcount_dec_not_one(r))
+               return false;
+
+       spin_lock_irqsave(lock, *flags);
+       if (!refcount_dec_and_test(r)) {
+               spin_unlock_irqrestore(lock, *flags);
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
index 9427b5766134cb139ef385b27f92f6027fecceca..0e04947b7e0c588fbcdce89128d6fb3140b9c8d3 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/rhashtable.h>
 #include <linux/err.h>
 #include <linux/export.h>
+#include <linux/rhashtable.h>
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4U
@@ -115,8 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
 
 static union nested_table *nested_table_alloc(struct rhashtable *ht,
                                              union nested_table __rcu **prev,
-                                             unsigned int shifted,
-                                             unsigned int nhash)
+                                             bool leaf)
 {
        union nested_table *ntbl;
        int i;
@@ -127,10 +127,9 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
 
        ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
 
-       if (ntbl && shifted) {
-               for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
-                       INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
-                                           (i << shifted) | nhash);
+       if (ntbl && leaf) {
+               for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
+                       INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
        }
 
        rcu_assign_pointer(*prev, ntbl);
@@ -156,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
                return NULL;
 
        if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
-                               0, 0)) {
+                               false)) {
                kfree(tbl);
                return NULL;
        }
@@ -206,7 +205,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
        tbl->hash_rnd = get_random_u32();
 
        for (i = 0; i < nbuckets; i++)
-               INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+               INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
 
        return tbl;
 }
@@ -227,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
 {
        struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
-       struct bucket_table *new_tbl = rhashtable_last_table(ht,
-               rht_dereference_rcu(old_tbl->future_tbl, ht));
+       struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
        struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
        int err = -EAGAIN;
        struct rhash_head *head, *next, *entry;
@@ -298,21 +296,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
                                    struct bucket_table *old_tbl,
                                    struct bucket_table *new_tbl)
 {
-       /* Protect future_tbl using the first bucket lock. */
-       spin_lock_bh(old_tbl->locks);
-
-       /* Did somebody beat us to it? */
-       if (rcu_access_pointer(old_tbl->future_tbl)) {
-               spin_unlock_bh(old_tbl->locks);
-               return -EEXIST;
-       }
-
        /* Make insertions go into the new, empty table right away. Deletions
         * and lookups will be attempted in both tables until we synchronize.
+        * As cmpxchg() provides strong barriers, we do not need
+        * rcu_assign_pointer().
         */
-       rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
 
-       spin_unlock_bh(old_tbl->locks);
+       if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
+               return -EEXIST;
 
        return 0;
 }
@@ -475,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
 
 fail:
        /* Do not fail the insert if someone else did a rehash. */
-       if (likely(rcu_dereference_raw(tbl->future_tbl)))
+       if (likely(rcu_access_pointer(tbl->future_tbl)))
                return 0;
 
        /* Schedule async rehash to retry allocation in process context. */
@@ -548,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
        if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
                return ERR_CAST(data);
 
-       new_tbl = rcu_dereference(tbl->future_tbl);
+       new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
        if (new_tbl)
                return new_tbl;
 
@@ -607,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
                        break;
 
                spin_unlock_bh(lock);
-               tbl = rcu_dereference(tbl->future_tbl);
+               tbl = rht_dereference_rcu(tbl->future_tbl, ht);
        }
 
        data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
@@ -994,7 +985,6 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
  *     .key_offset = offsetof(struct test_obj, key),
  *     .key_len = sizeof(int),
  *     .hashfn = jhash,
- *     .nulls_base = (1U << RHT_BASE_SHIFT),
  * };
  *
  * Configuration Example 2: Variable length keys
@@ -1028,9 +1018,6 @@ int rhashtable_init(struct rhashtable *ht,
            (params->obj_hashfn && !params->obj_cmpfn))
                return -EINVAL;
 
-       if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
-               return -EINVAL;
-
        memset(ht, 0, sizeof(*ht));
        mutex_init(&ht->mutex);
        spin_lock_init(&ht->lock);
@@ -1095,10 +1082,6 @@ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
 {
        int err;
 
-       /* No rhlist NULLs marking for now. */
-       if (params->nulls_base)
-               return -EINVAL;
-
        err = rhashtable_init(&hlt->ht, params);
        hlt->ht.rhlist = true;
        return err;
@@ -1216,25 +1199,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
        unsigned int index = hash & ((1 << tbl->nest) - 1);
        unsigned int size = tbl->size >> tbl->nest;
        union nested_table *ntbl;
-       unsigned int shifted;
-       unsigned int nhash;
 
        ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
        hash >>= tbl->nest;
-       nhash = index;
-       shifted = tbl->nest;
        ntbl = nested_table_alloc(ht, &ntbl[index].table,
-                                 size <= (1 << shift) ? shifted : 0, nhash);
+                                 size <= (1 << shift));
 
        while (ntbl && size > (1 << shift)) {
                index = hash & ((1 << shift) - 1);
                size >>= shift;
                hash >>= shift;
-               nhash |= index << shifted;
-               shifted += shift;
                ntbl = nested_table_alloc(ht, &ntbl[index].table,
-                                         size <= (1 << shift) ? shifted : 0,
-                                         nhash);
+                                         size <= (1 << shift));
        }
 
        if (!ntbl)
index 1642fd507a960f5deb2b6d7366db83800a3e547b..7c6096a7170486449736d82a37fbd50326ac169e 100644 (file)
@@ -24,9 +24,6 @@
  **/
 struct scatterlist *sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        if (sg_is_last(sg))
                return NULL;
 
@@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
        for_each_sg(sgl, sg, nents, i)
                ret = sg;
 
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sgl[0].sg_magic != SG_MAGIC);
        BUG_ON(!sg_is_last(ret));
-#endif
        return ret;
 }
 EXPORT_SYMBOL(sg_last);
index 60aedc87936106460e436fe66429d45a59f36060..08d3d59dca17343c1a91def02d0a7da931c1c0f0 100644 (file)
@@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = {
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Ctx heavy transformations",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { },
                {
                        {  1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
                        { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
                },
                .fill_helper = bpf_fill_maxinsns6,
+               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Call heavy transformations",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC | FLAG_NO_DATA,
+#endif
                { },
                { { 1, 0 }, { 10, 0 } },
                .fill_helper = bpf_fill_maxinsns7,
+               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Jump heavy test",
@@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = {
        {
                "BPF_MAXINSNS: exec all MSH",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { 0xfa, 0xfb, 0xfc, 0xfd, },
                { { 4, 0xababab83 } },
                .fill_helper = bpf_fill_maxinsns13,
+               .expected_errcode = -ENOTSUPP,
        },
        {
                "BPF_MAXINSNS: ld_abs+get_processor_id",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { },
                { { 1, 0xbee } },
                .fill_helper = bpf_fill_ld_abs_get_processor_id,
+               .expected_errcode = -ENOTSUPP,
        },
        /*
         * LD_IND / LD_ABS on fragmented SKBs
index b2aa8f5148449de1557e3ee48feebb8f1cab3083..cea592f402ed029d6d5dd63addd2d2bc8a8391f1 100644 (file)
@@ -260,13 +260,6 @@ plain(void)
 {
        int err;
 
-       /*
-        * Make sure crng is ready. Otherwise we get "(ptrval)" instead
-        * of a hashed address when printing '%p' in plain_hash() and
-        * plain_format().
-        */
-       wait_for_random_bytes();
-
        err = plain_hash();
        if (err) {
                pr_warn("plain 'p' does not appear to be hashed\n");
index fb69681091134cc879748b40f1f371978056be25..82ac39ce53105f2dc39d517467333b255ae218cb 100644 (file)
@@ -83,7 +83,7 @@ static u32 my_hashfn(const void *data, u32 len, u32 seed)
 {
        const struct test_obj_rhl *obj = data;
 
-       return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
+       return (obj->value.id % 10);
 }
 
 static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
@@ -99,7 +99,6 @@ static struct rhashtable_params test_rht_params = {
        .key_offset = offsetof(struct test_obj, value),
        .key_len = sizeof(struct test_obj_val),
        .hashfn = jhash,
-       .nulls_base = (3U << RHT_BASE_SHIFT),
 };
 
 static struct rhashtable_params test_rht_params_dup = {
@@ -296,8 +295,6 @@ static int __init test_rhltable(unsigned int entries)
        if (!obj_in_table)
                goto out_free;
 
-       /* nulls_base not supported in rhlist interface */
-       test_rht_params.nulls_base = 0;
        err = rhltable_init(&rhlt, &test_rht_params);
        if (WARN_ON(err))
                goto out_free;
@@ -501,6 +498,8 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
        unsigned int i, cnt = 0;
 
        ht = &rhlt->ht;
+       /* Take the mutex to avoid RCU warning */
+       mutex_lock(&ht->mutex);
        tbl = rht_dereference(ht->tbl, ht);
        for (i = 0; i < tbl->size; i++) {
                struct rhash_head *pos, *next;
@@ -534,6 +533,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
                }
        }
        printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
+       mutex_unlock(&ht->mutex);
 
        return cnt;
 }
index 347cc834c04a8cbc388af1b7594e4f09bdc68b41..2e5d3df0853d928021cba0e30c70ff682afeaf68 100644 (file)
@@ -359,15 +359,8 @@ static void wb_shutdown(struct bdi_writeback *wb)
        spin_lock_bh(&wb->work_lock);
        if (!test_and_clear_bit(WB_registered, &wb->state)) {
                spin_unlock_bh(&wb->work_lock);
-               /*
-                * Wait for wb shutdown to finish if someone else is just
-                * running wb_shutdown(). Otherwise we could proceed to wb /
-                * bdi destruction before wb_shutdown() is finished.
-                */
-               wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
                return;
        }
-       set_bit(WB_shutting_down, &wb->state);
        spin_unlock_bh(&wb->work_lock);
 
        cgwb_remove_from_bdi_list(wb);
@@ -379,12 +372,6 @@ static void wb_shutdown(struct bdi_writeback *wb)
        mod_delayed_work(bdi_wq, &wb->dwork, 0);
        flush_delayed_work(&wb->dwork);
        WARN_ON(!list_empty(&wb->work_list));
-       /*
-        * Make sure bit gets cleared after shutdown is finished. Matches with
-        * the barrier provided by test_and_clear_bit() above.
-        */
-       smp_wmb();
-       clear_and_wake_up_bit(WB_shutting_down, &wb->state);
 }
 
 static void wb_exit(struct bdi_writeback *wb)
@@ -508,10 +495,12 @@ static void cgwb_release_workfn(struct work_struct *work)
        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
                                                release_work);
 
+       mutex_lock(&wb->bdi->cgwb_release_mutex);
        wb_shutdown(wb);
 
        css_put(wb->memcg_css);
        css_put(wb->blkcg_css);
+       mutex_unlock(&wb->bdi->cgwb_release_mutex);
 
        fprop_local_destroy_percpu(&wb->memcg_completions);
        percpu_ref_exit(&wb->refcnt);
@@ -697,6 +686,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
 
        INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
        bdi->cgwb_congested_tree = RB_ROOT;
+       mutex_init(&bdi->cgwb_release_mutex);
 
        ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
        if (!ret) {
@@ -717,7 +707,10 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
        spin_lock_irq(&cgwb_lock);
        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
                cgwb_kill(*slot);
+       spin_unlock_irq(&cgwb_lock);
 
+       mutex_lock(&bdi->cgwb_release_mutex);
+       spin_lock_irq(&cgwb_lock);
        while (!list_empty(&bdi->wb_list)) {
                wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
                                      bdi_node);
@@ -726,6 +719,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
                spin_lock_irq(&cgwb_lock);
        }
        spin_unlock_irq(&cgwb_lock);
+       mutex_unlock(&bdi->cgwb_release_mutex);
 }
 
 /**
index cc16d70b8333890730d16c08b858631947e38d70..03d48d8835babeb507f78057c3f3068339dad77a 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/kmemleak.h>
 #include <linux/seq_file.h>
 #include <linux/memblock.h>
-#include <linux/bootmem.h>
 
 #include <asm/sections.h>
 #include <linux/io.h>
index 890b1f04a03a3d46f80fe1b2cfccae1f14b79836..2296caf87bfbd28a626663af04f2054a0cc3c45c 100644 (file)
@@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_cache *s)
        list_del(&s->list);
 
        if (s->flags & SLAB_TYPESAFE_BY_RCU) {
+#ifdef SLAB_SUPPORTS_SYSFS
+               sysfs_slab_unlink(s);
+#endif
                list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
                schedule_work(&slab_caches_to_rcu_destroy_work);
        } else {
 #ifdef SLAB_SUPPORTS_SYSFS
+               sysfs_slab_unlink(s);
                sysfs_slab_release(s);
 #else
                slab_kmem_cache_release(s);
index a3b8467c14af642138deaf35fd3ed3f7f87aed93..51258eff417836f6c5a72433a65c016c8391beb2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
        kset_unregister(s->memcg_kset);
 #endif
        kobject_uevent(&s->kobj, KOBJ_REMOVE);
-       kobject_del(&s->kobj);
 out:
        kobject_put(&s->kobj);
 }
@@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kmem_cache *s)
        schedule_work(&s->kobj_remove_work);
 }
 
+void sysfs_slab_unlink(struct kmem_cache *s)
+{
+       if (slab_state >= FULL)
+               kobject_del(&s->kobj);
+}
+
 void sysfs_slab_release(struct kmem_cache *s)
 {
        if (slab_state >= FULL)
index 75eda9c2b2602fe24b4c431f797c5e0fc563ebda..8ba0870ecddd0fd592d16ee674b060db512b5b37 100644 (file)
@@ -1796,11 +1796,9 @@ static void vmstat_update(struct work_struct *w)
                 * to occur in the future. Keep on running the
                 * update worker thread.
                 */
-               preempt_disable();
                queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
                                this_cpu_ptr(&vmstat_work),
                                round_jiffies_relative(sysctl_stat_interval));
-               preempt_enable();
        }
 }
 
index 73a65789271ba9346902dd721b0accd8ce747adc..5e99504539559e39f45e873171e53a565195b7bc 100644 (file)
@@ -647,13 +647,14 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
        return err;
 }
 
-static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
-                                        struct sk_buff *skb)
+static struct sk_buff *vlan_gro_receive(struct list_head *head,
+                                       struct sk_buff *skb)
 {
-       struct sk_buff *p, **pp = NULL;
-       struct vlan_hdr *vhdr;
-       unsigned int hlen, off_vlan;
        const struct packet_offload *ptype;
+       unsigned int hlen, off_vlan;
+       struct sk_buff *pp = NULL;
+       struct vlan_hdr *vhdr;
+       struct sk_buff *p;
        __be16 type;
        int flush = 1;
 
@@ -675,7 +676,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
 
        flush = 0;
 
-       for (p = *head; p; p = p->next) {
+       list_for_each_entry(p, head, list) {
                struct vlan_hdr *vhdr2;
 
                if (!NAPI_GRO_CB(p)->same_flow)
@@ -693,7 +694,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 13ec0d5415c74486c68f8290689d16d78513e6e9..bdaf53925acd5606fdb953800620bd05cf0f259e 100644 (file)
@@ -20,11 +20,7 @@ obj-$(CONFIG_TLS)            += tls/
 obj-$(CONFIG_XFRM)             += xfrm/
 obj-$(CONFIG_UNIX)             += unix/
 obj-$(CONFIG_NET)              += ipv6/
-ifneq ($(CC_CAN_LINK),y)
-$(warning CC cannot link executables. Skipping bpfilter.)
-else
 obj-$(CONFIG_BPFILTER)         += bpfilter/
-endif
 obj-$(CONFIG_PACKET)           += packet/
 obj-$(CONFIG_NET_KEY)          += key/
 obj-$(CONFIG_BRIDGE)           += bridge/
index 55fdba05d7d9daa805d358118852aabb07746e81..9b6bc5abe94680c0a982b9193932f245080f2f85 100644 (file)
@@ -1869,7 +1869,7 @@ static const struct proto_ops atalk_dgram_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = atalk_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = atalk_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = atalk_compat_ioctl,
index 36b3adacc0ddc1bd9a6c5b8dd55cedba4e9bf47b..10462de734eafc00efb9490ddd58cd0bbc83b7c8 100644 (file)
@@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
 
        ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
-       refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = atmvcc->atm_options;
+       atm_account_tx(atmvcc, skb);
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
 
index 66caa48a27c2307c1b2b43f4a4381f3b34e78485..d795b9c5aea4a4e35021d9db2e10254036df55fe 100644 (file)
@@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
                memcpy(here, llc_oui, sizeof(llc_oui));
                ((__be16 *) here)[3] = skb->protocol;
        }
-       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
+       atm_account_tx(vcc, skb);
        entry->vccs->last_use = jiffies;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
        old = xchg(&entry->vccs->xoff, 1);      /* assume XOFF ... */
index 1f2af59935db356c003cfa8dd7d1bce388fada53..a7a68e5096288df11af1037297189962dc2fa548 100644 (file)
@@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
                goto out;
        }
        pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
-       refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+       atm_account_tx(vcc, skb);
 
        skb->dev = NULL; /* for paths shared with net_device interfaces */
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
        if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
                kfree_skb(skb);
                error = -EFAULT;
@@ -648,11 +647,16 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
        return error;
 }
 
-__poll_t vcc_poll_mask(struct socket *sock, __poll_t events)
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       struct atm_vcc *vcc = ATM_SD(sock);
-       __poll_t mask = 0;
+       struct atm_vcc *vcc;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
+
+       vcc = ATM_SD(sock);
 
        /* exceptional events */
        if (sk->sk_err)
index 526796ad230fc6a2dbdca37f0d4f66f4edf47f17..5850649068bb29b3d688b4c8e29373b4a7f7592d 100644 (file)
@@ -17,7 +17,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
 int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                int flags);
 int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len);
-__poll_t vcc_poll_mask(struct socket *sock, __poll_t events);
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
 int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_setsockopt(struct socket *sock, int level, int optname,
index 5a95fcf6f9b6cc62ced5480910dac7e41f2e7f06..d7f5cf5b7594d0ea4e766e06fbc07e6fce590e3b 100644 (file)
@@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
        struct net_device *dev = skb->dev;
 
        ATM_SKB(skb)->vcc = vcc;
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
+       atm_account_tx(vcc, skb);
 
-       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
        if (vcc->send(vcc, skb) < 0) {
                dev->stats.tx_dropped++;
                return;
index 75620c2f261723a915b74df013ac214479ba70c4..24b53c4c39c6a6b5323a1aa79318b2ab2907a332 100644 (file)
@@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
                                        sizeof(struct llc_snap_hdr));
        }
 
-       refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
+       atm_account_tx(entry->shortcut, skb);
        entry->shortcut->send(entry->shortcut, skb);
        entry->packets_fwded++;
        mpc->in_ops->put(entry);
index 21d9d341a6199255a017437954e4b688f1ba5bfd..af8c4b38b7463e03bf4b060735ce852b515d526c 100644 (file)
@@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
                return 1;
        }
 
-       refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
+       atm_account_tx(vcc, skb);
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
                 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
        ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
index 9f75092fe7785c080b2a32f9c2c8b147056bd488..2cb10af16afcf8eeb925bfe1aab33e839821109a 100644 (file)
@@ -113,7 +113,7 @@ static const struct proto_ops pvc_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      pvc_getname,
-       .poll_mask =    vcc_poll_mask,
+       .poll =         vcc_poll,
        .ioctl =        vcc_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = vcc_compat_ioctl,
index ee10e8d46185173067f459aa5efdf5a77f8f9f06..b3ba44aab0ee6c9425fd278ebf8e2df1590a6d7a 100644 (file)
@@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
        struct sock *sk = sk_atm(vcc);
 
        pr_debug("(%d) %d -= %d\n",
-                vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
-       WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
+                vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
+       WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
        dev_kfree_skb_any(skb);
        sk->sk_write_space(sk);
 }
index 53f4ad7087b169bccbd8d0b86c7463fd77204a8d..2f91b766ac423c97a0b9c1fd340222e31b17eefa 100644 (file)
@@ -636,7 +636,7 @@ static const struct proto_ops svc_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       svc_accept,
        .getname =      svc_getname,
-       .poll_mask =    vcc_poll_mask,
+       .poll =         vcc_poll,
        .ioctl =        svc_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = svc_compat_ioctl,
index d1d2442ce573280cbc5b12beba96225bb7445a47..c603d33d54108b9f93f1745534da28d25f12c0ea 100644 (file)
@@ -1941,7 +1941,7 @@ static const struct proto_ops ax25_proto_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = ax25_accept,
        .getname        = ax25_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = ax25_ioctl,
        .listen         = ax25_listen,
        .shutdown       = ax25_shutdown,
index a2de5a44bd41bf5c3d521d29b72e0b225a3ace05..ff9659af6b9177cdb23523d79a4bc70665b1cc4d 100644 (file)
@@ -1449,7 +1449,7 @@ static void batadv_bla_periodic_work(struct work_struct *work)
                 * detection frames. Set the locally administered bit to avoid
                 * collisions with users mac addresses.
                 */
-               random_ether_addr(bat_priv->bla.loopdetect_addr);
+               eth_random_addr(bat_priv->bla.loopdetect_addr);
                bat_priv->bla.loopdetect_addr[0] = 0xba;
                bat_priv->bla.loopdetect_addr[1] = 0xbe;
                bat_priv->bla.loopdetect_lasttime = jiffies;
index 510ab4f55df56bc1c356d5130d2dbea4be4744ff..3264e1873219bd40b8c1ccfc2ce6c40d96ca0030 100644 (file)
@@ -437,13 +437,16 @@ static inline __poll_t bt_accept_poll(struct sock *parent)
        return 0;
 }
 
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events)
+__poll_t bt_sock_poll(struct file *file, struct socket *sock,
+                         poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
        BT_DBG("sock %p, sk %p", sock, sk);
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == BT_LISTEN)
                return bt_accept_poll(sk);
 
@@ -475,7 +478,7 @@ __poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(bt_sock_poll_mask);
+EXPORT_SYMBOL(bt_sock_poll);
 
 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
index d6c0998615388d078c0910bee08784b4fac2f0c0..1506e1632394acf06e9f5873d045bd394e5b3059 100644 (file)
@@ -1975,7 +1975,7 @@ static const struct proto_ops hci_sock_ops = {
        .sendmsg        = hci_sock_sendmsg,
        .recvmsg        = hci_sock_recvmsg,
        .ioctl          = hci_sock_ioctl,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = hci_sock_setsockopt,
index 742a190034e6378a4be886ed730d55936c82ee27..686bdc6b35b03d1fd0965dc0fd76c5edde78c1eb 100644 (file)
@@ -1653,7 +1653,7 @@ static const struct proto_ops l2cap_sock_ops = {
        .getname        = l2cap_sock_getname,
        .sendmsg        = l2cap_sock_sendmsg,
        .recvmsg        = l2cap_sock_recvmsg,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .ioctl          = bt_sock_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 1cf57622473aa70d626e1df5ad867800ab4cfe6e..d606e9212291608ea2e266238c0f65ce18d0c311 100644 (file)
@@ -1049,7 +1049,7 @@ static const struct proto_ops rfcomm_sock_ops = {
        .setsockopt     = rfcomm_sock_setsockopt,
        .getsockopt     = rfcomm_sock_getsockopt,
        .ioctl          = rfcomm_sock_ioctl,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .socketpair     = sock_no_socketpair,
        .mmap           = sock_no_mmap
 };
index d60dbc61d170864b1393aabb0d7f7965a1e6ad17..413b8ee49feca325dea79e328c11b8ba00afbce3 100644 (file)
@@ -1197,7 +1197,7 @@ static const struct proto_ops sco_sock_ops = {
        .getname        = sco_sock_getname,
        .sendmsg        = sco_sock_sendmsg,
        .recvmsg        = sco_sock_recvmsg,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .ioctl          = bt_sock_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
diff --git a/net/bpfilter/.gitignore b/net/bpfilter/.gitignore
new file mode 100644 (file)
index 0000000..e97084e
--- /dev/null
@@ -0,0 +1 @@
+bpfilter_umh
index a948b072c28f36451a587a88bcb6f86c32023693..76deb661588322d9cf8ac6bdd73ba63f5d1416fc 100644 (file)
@@ -1,6 +1,5 @@
 menuconfig BPFILTER
        bool "BPF based packet filtering framework (BPFILTER)"
-       default n
        depends on NET && BPF && INET
        help
          This builds experimental bpfilter framework that is aiming to
@@ -9,6 +8,7 @@ menuconfig BPFILTER
 if BPFILTER
 config BPFILTER_UMH
        tristate "bpfilter kernel module with user mode helper"
+       depends on $(success,$(srctree)/scripts/cc-can-link.sh $(CC))
        default m
        help
          This builds bpfilter kernel module with embedded user mode helper
index e0bbe7583e58dcca5e17136d1b091ff03465b4d2..39c6980b5d9952eed1046f656d8c0a85b4a0d2d6 100644 (file)
@@ -15,18 +15,7 @@ ifeq ($(CONFIG_BPFILTER_UMH), y)
 HOSTLDFLAGS += -static
 endif
 
-# a bit of elf magic to convert bpfilter_umh binary into a binary blob
-# inside bpfilter_umh.o elf file referenced by
-# _binary_net_bpfilter_bpfilter_umh_start symbol
-# which bpfilter_kern.c passes further into umh blob loader at run-time
-quiet_cmd_copy_umh = GEN $@
-      cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \
-      $(OBJCOPY) -I binary -O `$(OBJDUMP) -f $<|grep format|cut -d' ' -f8` \
-      -B `$(OBJDUMP) -f $<|grep architecture|cut -d, -f1|cut -d' ' -f2` \
-      --rename-section .data=.init.rodata $< $@
-
-$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh
-       $(call cmd,copy_umh)
+$(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh
 
 obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o
-bpfilter-objs += bpfilter_kern.o bpfilter_umh.o
+bpfilter-objs += bpfilter_kern.o bpfilter_umh_blob.o
index 09522573f611b01ba5fb4d52125e8264d9147f20..f0fc182d3db77eb311d91f7faef4e8a6f85886b3 100644 (file)
 #include <linux/file.h>
 #include "msgfmt.h"
 
-#define UMH_start _binary_net_bpfilter_bpfilter_umh_start
-#define UMH_end _binary_net_bpfilter_bpfilter_umh_end
-
-extern char UMH_start;
-extern char UMH_end;
+extern char bpfilter_umh_start;
+extern char bpfilter_umh_end;
 
 static struct umh_info info;
 /* since ip_getsockopt() can run in parallel, serialize access to umh */
@@ -93,7 +90,9 @@ static int __init load_umh(void)
        int err;
 
        /* fork usermode process */
-       err = fork_usermode_blob(&UMH_start, &UMH_end - &UMH_start, &info);
+       err = fork_usermode_blob(&bpfilter_umh_start,
+                                &bpfilter_umh_end - &bpfilter_umh_start,
+                                &info);
        if (err)
                return err;
        pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S
new file mode 100644 (file)
index 0000000..40311d1
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+       .section .init.rodata, "a"
+       .global bpfilter_umh_start
+bpfilter_umh_start:
+       .incbin "net/bpfilter/bpfilter_umh"
+       .global bpfilter_umh_end
+bpfilter_umh_end:
index c7991867d62273f48bb55e88774b573e81f40536..a6fb1b3bcad9b2f3c1c24b2a3496ad21b07c69d9 100644 (file)
@@ -934,11 +934,15 @@ static int caif_release(struct socket *sock)
 }
 
 /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
-static __poll_t caif_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t caif_poll(struct file *file,
+                             struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
+       __poll_t mask;
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
-       __poll_t mask = 0;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err)
@@ -972,7 +976,7 @@ static const struct proto_ops caif_seqpacket_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = sock_no_getname,
-       .poll_mask = caif_poll_mask,
+       .poll = caif_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = sock_no_shutdown,
@@ -993,7 +997,7 @@ static const struct proto_ops caif_stream_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = sock_no_getname,
-       .poll_mask = caif_poll_mask,
+       .poll = caif_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = sock_no_shutdown,
index 9393f25df08d3fce299aaa463efd79244e6527e9..0af8f0db892a3311fb5a1a898ab0bff5696adf00 100644 (file)
@@ -1660,7 +1660,7 @@ static const struct proto_ops bcm_ops = {
        .socketpair    = sock_no_socketpair,
        .accept        = sock_no_accept,
        .getname       = sock_no_getname,
-       .poll_mask     = datagram_poll_mask,
+       .poll          = datagram_poll,
        .ioctl         = can_ioctl,     /* use can_ioctl() from af_can.c */
        .listen        = sock_no_listen,
        .shutdown      = sock_no_shutdown,
index fd7e2f49ea6a20b79c43bf50c72d2b1e8b48d260..1051eee8258184f33d15a6142ee8b387839c9adc 100644 (file)
@@ -843,7 +843,7 @@ static const struct proto_ops raw_ops = {
        .socketpair    = sock_no_socketpair,
        .accept        = sock_no_accept,
        .getname       = raw_getname,
-       .poll_mask     = datagram_poll_mask,
+       .poll          = datagram_poll,
        .ioctl         = can_ioctl,     /* use can_ioctl() from af_can.c */
        .listen        = sock_no_listen,
        .shutdown      = sock_no_shutdown,
index f19bf3dc2bd6ea02cb828a95d0b91322ac8b0004..9938952c5c78f1e72ef13f44517ef054a60205b2 100644 (file)
@@ -819,8 +819,9 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
 
 /**
  *     datagram_poll - generic datagram poll
+ *     @file: file struct
  *     @sock: socket
- *     @events to wait for
+ *     @wait: poll table
  *
  *     Datagram poll: Again totally generic. This also handles
  *     sequenced packet sockets providing the socket receive queue
@@ -830,10 +831,14 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
  *     and you use a different write policy from sock_writeable()
  *     then please supply your own write_space callback.
  */
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+                          poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -866,4 +871,4 @@ __poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(datagram_poll_mask);
+EXPORT_SYMBOL(datagram_poll);
index 57b7bab5f70bb7c50a8be565cc90a40bc1c2d5d6..4f8b92d81d107fc9acd2499297435cbd9e9b5c67 100644 (file)
 
 #include "net-sysfs.h"
 
-/* Instead of increasing this, you should create a hash table. */
 #define MAX_GRO_SKBS 8
 
 /* This should be increased if a protocol with a bigger head is added. */
@@ -2068,11 +2067,13 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
                struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
                int i;
 
+               /* walk through the TCs and see if it falls into any of them */
                for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
                        if ((txq - tc->offset) < tc->count)
                                return i;
                }
 
+               /* didn't find it, just return -1 to indicate no match */
                return -1;
        }
 
@@ -2081,6 +2082,10 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
 EXPORT_SYMBOL(netdev_txq_to_tc);
 
 #ifdef CONFIG_XPS
+struct static_key xps_needed __read_mostly;
+EXPORT_SYMBOL(xps_needed);
+struct static_key xps_rxqs_needed __read_mostly;
+EXPORT_SYMBOL(xps_rxqs_needed);
 static DEFINE_MUTEX(xps_map_mutex);
 #define xmap_dereference(P)            \
        rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
@@ -2092,7 +2097,7 @@ static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
        int pos;
 
        if (dev_maps)
-               map = xmap_dereference(dev_maps->cpu_map[tci]);
+               map = xmap_dereference(dev_maps->attr_map[tci]);
        if (!map)
                return false;
 
@@ -2105,7 +2110,7 @@ static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
                        break;
                }
 
-               RCU_INIT_POINTER(dev_maps->cpu_map[tci], NULL);
+               RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
                kfree_rcu(map, rcu);
                return false;
        }
@@ -2135,33 +2140,68 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
        return active;
 }
 
+static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
+                          struct xps_dev_maps *dev_maps, unsigned int nr_ids,
+                          u16 offset, u16 count, bool is_rxqs_map)
+{
+       bool active = false;
+       int i, j;
+
+       for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
+            j < nr_ids;)
+               active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
+                                              count);
+       if (!active) {
+               if (is_rxqs_map) {
+                       RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
+               } else {
+                       RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+
+                       for (i = offset + (count - 1); count--; i--)
+                               netdev_queue_numa_node_write(
+                                       netdev_get_tx_queue(dev, i),
+                                                       NUMA_NO_NODE);
+               }
+               kfree_rcu(dev_maps, rcu);
+       }
+}
+
 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
                                   u16 count)
 {
+       const unsigned long *possible_mask = NULL;
        struct xps_dev_maps *dev_maps;
-       int cpu, i;
-       bool active = false;
+       unsigned int nr_ids;
+
+       if (!static_key_false(&xps_needed))
+               return;
 
        mutex_lock(&xps_map_mutex);
-       dev_maps = xmap_dereference(dev->xps_maps);
 
+       if (static_key_false(&xps_rxqs_needed)) {
+               dev_maps = xmap_dereference(dev->xps_rxqs_map);
+               if (dev_maps) {
+                       nr_ids = dev->num_rx_queues;
+                       clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
+                                      offset, count, true);
+               }
+       }
+
+       dev_maps = xmap_dereference(dev->xps_cpus_map);
        if (!dev_maps)
                goto out_no_maps;
 
-       for_each_possible_cpu(cpu)
-               active |= remove_xps_queue_cpu(dev, dev_maps, cpu,
-                                              offset, count);
-
-       if (!active) {
-               RCU_INIT_POINTER(dev->xps_maps, NULL);
-               kfree_rcu(dev_maps, rcu);
-       }
-
-       for (i = offset + (count - 1); count--; i--)
-               netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
-                                            NUMA_NO_NODE);
+       if (num_possible_cpus() > 1)
+               possible_mask = cpumask_bits(cpu_possible_mask);
+       nr_ids = nr_cpu_ids;
+       clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
+                      false);
 
 out_no_maps:
+       if (static_key_enabled(&xps_rxqs_needed))
+               static_key_slow_dec(&xps_rxqs_needed);
+
+       static_key_slow_dec(&xps_needed);
        mutex_unlock(&xps_map_mutex);
 }
 
@@ -2170,8 +2210,8 @@ static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
        netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
 }
 
-static struct xps_map *expand_xps_map(struct xps_map *map,
-                                     int cpu, u16 index)
+static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
+                                     u16 index, bool is_rxqs_map)
 {
        struct xps_map *new_map;
        int alloc_len = XPS_MIN_MAP_ALLOC;
@@ -2183,7 +2223,7 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
                return map;
        }
 
-       /* Need to add queue to this CPU's existing map */
+       /* Need to add tx-queue to this CPU's/rx-queue's existing map */
        if (map) {
                if (pos < map->alloc_len)
                        return map;
@@ -2191,9 +2231,14 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
                alloc_len = map->alloc_len * 2;
        }
 
-       /* Need to allocate new map to store queue on this CPU's map */
-       new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
-                              cpu_to_node(cpu));
+       /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
+        *  map
+        */
+       if (is_rxqs_map)
+               new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
+       else
+               new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
+                                      cpu_to_node(attr_index));
        if (!new_map)
                return NULL;
 
@@ -2205,32 +2250,52 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
        return new_map;
 }
 
-int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
-                       u16 index)
+int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
+                         u16 index, bool is_rxqs_map)
 {
+       const unsigned long *online_mask = NULL, *possible_mask = NULL;
        struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
-       int i, cpu, tci, numa_node_id = -2;
+       int i, j, tci, numa_node_id = -2;
        int maps_sz, num_tc = 1, tc = 0;
        struct xps_map *map, *new_map;
        bool active = false;
+       unsigned int nr_ids;
 
        if (dev->num_tc) {
+               /* Do not allow XPS on subordinate device directly */
                num_tc = dev->num_tc;
+               if (num_tc < 0)
+                       return -EINVAL;
+
+               /* If queue belongs to subordinate dev use its map */
+               dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
+
                tc = netdev_txq_to_tc(dev, index);
                if (tc < 0)
                        return -EINVAL;
        }
 
-       maps_sz = XPS_DEV_MAPS_SIZE(num_tc);
-       if (maps_sz < L1_CACHE_BYTES)
-               maps_sz = L1_CACHE_BYTES;
-
        mutex_lock(&xps_map_mutex);
+       if (is_rxqs_map) {
+               maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
+               dev_maps = xmap_dereference(dev->xps_rxqs_map);
+               nr_ids = dev->num_rx_queues;
+       } else {
+               maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
+               if (num_possible_cpus() > 1) {
+                       online_mask = cpumask_bits(cpu_online_mask);
+                       possible_mask = cpumask_bits(cpu_possible_mask);
+               }
+               dev_maps = xmap_dereference(dev->xps_cpus_map);
+               nr_ids = nr_cpu_ids;
+       }
 
-       dev_maps = xmap_dereference(dev->xps_maps);
+       if (maps_sz < L1_CACHE_BYTES)
+               maps_sz = L1_CACHE_BYTES;
 
        /* allocate memory for queue storage */
-       for_each_cpu_and(cpu, cpu_online_mask, mask) {
+       for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
+            j < nr_ids;) {
                if (!new_dev_maps)
                        new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
                if (!new_dev_maps) {
@@ -2238,73 +2303,85 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
                        return -ENOMEM;
                }
 
-               tci = cpu * num_tc + tc;
-               map = dev_maps ? xmap_dereference(dev_maps->cpu_map[tci]) :
+               tci = j * num_tc + tc;
+               map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
                                 NULL;
 
-               map = expand_xps_map(map, cpu, index);
+               map = expand_xps_map(map, j, index, is_rxqs_map);
                if (!map)
                        goto error;
 
-               RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
+               RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
        }
 
        if (!new_dev_maps)
                goto out_no_new_maps;
 
-       for_each_possible_cpu(cpu) {
+       static_key_slow_inc(&xps_needed);
+       if (is_rxqs_map)
+               static_key_slow_inc(&xps_rxqs_needed);
+
+       for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
+            j < nr_ids;) {
                /* copy maps belonging to foreign traffic classes */
-               for (i = tc, tci = cpu * num_tc; dev_maps && i--; tci++) {
+               for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
                        /* fill in the new device map from the old device map */
-                       map = xmap_dereference(dev_maps->cpu_map[tci]);
-                       RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
+                       map = xmap_dereference(dev_maps->attr_map[tci]);
+                       RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
                }
 
                /* We need to explicitly update tci as prevous loop
                 * could break out early if dev_maps is NULL.
                 */
-               tci = cpu * num_tc + tc;
+               tci = j * num_tc + tc;
 
-               if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
-                       /* add queue to CPU maps */
+               if (netif_attr_test_mask(j, mask, nr_ids) &&
+                   netif_attr_test_online(j, online_mask, nr_ids)) {
+                       /* add tx-queue to CPU/rx-queue maps */
                        int pos = 0;
 
-                       map = xmap_dereference(new_dev_maps->cpu_map[tci]);
+                       map = xmap_dereference(new_dev_maps->attr_map[tci]);
                        while ((pos < map->len) && (map->queues[pos] != index))
                                pos++;
 
                        if (pos == map->len)
                                map->queues[map->len++] = index;
 #ifdef CONFIG_NUMA
-                       if (numa_node_id == -2)
-                               numa_node_id = cpu_to_node(cpu);
-                       else if (numa_node_id != cpu_to_node(cpu))
-                               numa_node_id = -1;
+                       if (!is_rxqs_map) {
+                               if (numa_node_id == -2)
+                                       numa_node_id = cpu_to_node(j);
+                               else if (numa_node_id != cpu_to_node(j))
+                                       numa_node_id = -1;
+                       }
 #endif
                } else if (dev_maps) {
                        /* fill in the new device map from the old device map */
-                       map = xmap_dereference(dev_maps->cpu_map[tci]);
-                       RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
+                       map = xmap_dereference(dev_maps->attr_map[tci]);
+                       RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
                }
 
                /* copy maps belonging to foreign traffic classes */
                for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
                        /* fill in the new device map from the old device map */
-                       map = xmap_dereference(dev_maps->cpu_map[tci]);
-                       RCU_INIT_POINTER(new_dev_maps->cpu_map[tci], map);
+                       map = xmap_dereference(dev_maps->attr_map[tci]);
+                       RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
                }
        }
 
-       rcu_assign_pointer(dev->xps_maps, new_dev_maps);
+       if (is_rxqs_map)
+               rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
+       else
+               rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
 
        /* Cleanup old maps */
        if (!dev_maps)
                goto out_no_old_maps;
 
-       for_each_possible_cpu(cpu) {
-               for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
-                       new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
-                       map = xmap_dereference(dev_maps->cpu_map[tci]);
+       for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
+            j < nr_ids;) {
+               for (i = num_tc, tci = j * num_tc; i--; tci++) {
+                       new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
+                       map = xmap_dereference(dev_maps->attr_map[tci]);
                        if (map && map != new_map)
                                kfree_rcu(map, rcu);
                }
@@ -2317,19 +2394,23 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
        active = true;
 
 out_no_new_maps:
-       /* update Tx queue numa node */
-       netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
-                                    (numa_node_id >= 0) ? numa_node_id :
-                                    NUMA_NO_NODE);
+       if (!is_rxqs_map) {
+               /* update Tx queue numa node */
+               netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
+                                            (numa_node_id >= 0) ?
+                                            numa_node_id : NUMA_NO_NODE);
+       }
 
        if (!dev_maps)
                goto out_no_maps;
 
-       /* removes queue from unused CPUs */
-       for_each_possible_cpu(cpu) {
-               for (i = tc, tci = cpu * num_tc; i--; tci++)
+       /* removes tx-queue from unused CPUs/rx-queues */
+       for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
+            j < nr_ids;) {
+               for (i = tc, tci = j * num_tc; i--; tci++)
                        active |= remove_xps_queue(dev_maps, tci, index);
-               if (!cpumask_test_cpu(cpu, mask) || !cpu_online(cpu))
+               if (!netif_attr_test_mask(j, mask, nr_ids) ||
+                   !netif_attr_test_online(j, online_mask, nr_ids))
                        active |= remove_xps_queue(dev_maps, tci, index);
                for (i = num_tc - tc, tci++; --i; tci++)
                        active |= remove_xps_queue(dev_maps, tci, index);
@@ -2337,7 +2418,10 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
 
        /* free map if not active */
        if (!active) {
-               RCU_INIT_POINTER(dev->xps_maps, NULL);
+               if (is_rxqs_map)
+                       RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
+               else
+                       RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
                kfree_rcu(dev_maps, rcu);
        }
 
@@ -2347,11 +2431,12 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
        return 0;
 error:
        /* remove any maps that we added */
-       for_each_possible_cpu(cpu) {
-               for (i = num_tc, tci = cpu * num_tc; i--; tci++) {
-                       new_map = xmap_dereference(new_dev_maps->cpu_map[tci]);
+       for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
+            j < nr_ids;) {
+               for (i = num_tc, tci = j * num_tc; i--; tci++) {
+                       new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
                        map = dev_maps ?
-                             xmap_dereference(dev_maps->cpu_map[tci]) :
+                             xmap_dereference(dev_maps->attr_map[tci]) :
                              NULL;
                        if (new_map && new_map != map)
                                kfree(new_map);
@@ -2363,14 +2448,34 @@ int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
        kfree(new_dev_maps);
        return -ENOMEM;
 }
+
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+                       u16 index)
+{
+       return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
+}
 EXPORT_SYMBOL(netif_set_xps_queue);
 
 #endif
+static void netdev_unbind_all_sb_channels(struct net_device *dev)
+{
+       struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
+
+       /* Unbind any subordinate channels */
+       while (txq-- != &dev->_tx[0]) {
+               if (txq->sb_dev)
+                       netdev_unbind_sb_channel(dev, txq->sb_dev);
+       }
+}
+
 void netdev_reset_tc(struct net_device *dev)
 {
 #ifdef CONFIG_XPS
        netif_reset_xps_queues_gt(dev, 0);
 #endif
+       netdev_unbind_all_sb_channels(dev);
+
+       /* Reset TC configuration of device */
        dev->num_tc = 0;
        memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
        memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
@@ -2399,11 +2504,77 @@ int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
 #ifdef CONFIG_XPS
        netif_reset_xps_queues_gt(dev, 0);
 #endif
+       netdev_unbind_all_sb_channels(dev);
+
        dev->num_tc = num_tc;
        return 0;
 }
 EXPORT_SYMBOL(netdev_set_num_tc);
 
+void netdev_unbind_sb_channel(struct net_device *dev,
+                             struct net_device *sb_dev)
+{
+       struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
+
+#ifdef CONFIG_XPS
+       netif_reset_xps_queues_gt(sb_dev, 0);
+#endif
+       memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
+       memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
+
+       while (txq-- != &dev->_tx[0]) {
+               if (txq->sb_dev == sb_dev)
+                       txq->sb_dev = NULL;
+       }
+}
+EXPORT_SYMBOL(netdev_unbind_sb_channel);
+
+int netdev_bind_sb_channel_queue(struct net_device *dev,
+                                struct net_device *sb_dev,
+                                u8 tc, u16 count, u16 offset)
+{
+       /* Make certain the sb_dev and dev are already configured */
+       if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
+               return -EINVAL;
+
+       /* We cannot hand out queues we don't have */
+       if ((offset + count) > dev->real_num_tx_queues)
+               return -EINVAL;
+
+       /* Record the mapping */
+       sb_dev->tc_to_txq[tc].count = count;
+       sb_dev->tc_to_txq[tc].offset = offset;
+
+       /* Provide a way for Tx queue to find the tc_to_txq map or
+        * XPS map for itself.
+        */
+       while (count--)
+               netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
+
+       return 0;
+}
+EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
+
+int netdev_set_sb_channel(struct net_device *dev, u16 channel)
+{
+       /* Do not use a multiqueue device to represent a subordinate channel */
+       if (netif_is_multiqueue(dev))
+               return -ENODEV;
+
+       /* We allow channels 1 - 32767 to be used for subordinate channels.
+        * Channel 0 is meant to be "native" mode and used only to represent
+        * the main root device. We allow writing 0 to reset the device back
+        * to normal mode after being used as a subordinate channel.
+        */
+       if (channel > S16_MAX)
+               return -EINVAL;
+
+       dev->num_tc = -channel;
+
+       return 0;
+}
+EXPORT_SYMBOL(netdev_set_sb_channel);
+
 /*
  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -2615,24 +2786,26 @@ EXPORT_SYMBOL(netif_device_attach);
  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  * to be used as a distribution range.
  */
-static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb)
+static u16 skb_tx_hash(const struct net_device *dev,
+                      const struct net_device *sb_dev,
+                      struct sk_buff *skb)
 {
        u32 hash;
        u16 qoffset = 0;
        u16 qcount = dev->real_num_tx_queues;
 
+       if (dev->num_tc) {
+               u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+
+               qoffset = sb_dev->tc_to_txq[tc].offset;
+               qcount = sb_dev->tc_to_txq[tc].count;
+       }
+
        if (skb_rx_queue_recorded(skb)) {
                hash = skb_get_rx_queue(skb);
                while (unlikely(hash >= qcount))
                        hash -= qcount;
-               return hash;
-       }
-
-       if (dev->num_tc) {
-               u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
-
-               qoffset = dev->tc_to_txq[tc].offset;
-               qcount = dev->tc_to_txq[tc].count;
+               return hash + qoffset;
        }
 
        return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
@@ -3376,32 +3549,64 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
 }
 #endif /* CONFIG_NET_EGRESS */
 
-static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+#ifdef CONFIG_XPS
+static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
+                              struct xps_dev_maps *dev_maps, unsigned int tci)
+{
+       struct xps_map *map;
+       int queue_index = -1;
+
+       if (dev->num_tc) {
+               tci *= dev->num_tc;
+               tci += netdev_get_prio_tc_map(dev, skb->priority);
+       }
+
+       map = rcu_dereference(dev_maps->attr_map[tci]);
+       if (map) {
+               if (map->len == 1)
+                       queue_index = map->queues[0];
+               else
+                       queue_index = map->queues[reciprocal_scale(
+                                               skb_get_hash(skb), map->len)];
+               if (unlikely(queue_index >= dev->real_num_tx_queues))
+                       queue_index = -1;
+       }
+       return queue_index;
+}
+#endif
+
+static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
+                        struct sk_buff *skb)
 {
 #ifdef CONFIG_XPS
        struct xps_dev_maps *dev_maps;
-       struct xps_map *map;
+       struct sock *sk = skb->sk;
        int queue_index = -1;
 
+       if (!static_key_false(&xps_needed))
+               return -1;
+
        rcu_read_lock();
-       dev_maps = rcu_dereference(dev->xps_maps);
+       if (!static_key_false(&xps_rxqs_needed))
+               goto get_cpus_map;
+
+       dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
        if (dev_maps) {
-               unsigned int tci = skb->sender_cpu - 1;
+               int tci = sk_rx_queue_get(sk);
 
-               if (dev->num_tc) {
-                       tci *= dev->num_tc;
-                       tci += netdev_get_prio_tc_map(dev, skb->priority);
-               }
+               if (tci >= 0 && tci < dev->num_rx_queues)
+                       queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
+                                                         tci);
+       }
 
-               map = rcu_dereference(dev_maps->cpu_map[tci]);
-               if (map) {
-                       if (map->len == 1)
-                               queue_index = map->queues[0];
-                       else
-                               queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
-                                                                          map->len)];
-                       if (unlikely(queue_index >= dev->real_num_tx_queues))
-                               queue_index = -1;
+get_cpus_map:
+       if (queue_index < 0) {
+               dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
+               if (dev_maps) {
+                       unsigned int tci = skb->sender_cpu - 1;
+
+                       queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
+                                                         tci);
                }
        }
        rcu_read_unlock();
@@ -3412,17 +3617,36 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
 #endif
 }
 
-static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+                    struct net_device *sb_dev,
+                    select_queue_fallback_t fallback)
+{
+       return 0;
+}
+EXPORT_SYMBOL(dev_pick_tx_zero);
+
+u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback)
+{
+       return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
+}
+EXPORT_SYMBOL(dev_pick_tx_cpu_id);
+
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+                           struct net_device *sb_dev)
 {
        struct sock *sk = skb->sk;
        int queue_index = sk_tx_queue_get(sk);
 
+       sb_dev = sb_dev ? : dev;
+
        if (queue_index < 0 || skb->ooo_okay ||
            queue_index >= dev->real_num_tx_queues) {
-               int new_index = get_xps_queue(dev, skb);
+               int new_index = get_xps_queue(dev, sb_dev, skb);
 
                if (new_index < 0)
-                       new_index = skb_tx_hash(dev, skb);
+                       new_index = skb_tx_hash(dev, sb_dev, skb);
 
                if (queue_index != new_index && sk &&
                    sk_fullsock(sk) &&
@@ -3437,7 +3661,7 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
 
 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
                                    struct sk_buff *skb,
-                                   void *accel_priv)
+                                   struct net_device *sb_dev)
 {
        int queue_index = 0;
 
@@ -3452,10 +3676,10 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
                const struct net_device_ops *ops = dev->netdev_ops;
 
                if (ops->ndo_select_queue)
-                       queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
+                       queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
                                                            __netdev_pick_tx);
                else
-                       queue_index = __netdev_pick_tx(dev, skb);
+                       queue_index = __netdev_pick_tx(dev, skb, sb_dev);
 
                queue_index = netdev_cap_txqueue(dev, queue_index);
        }
@@ -3467,7 +3691,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 /**
  *     __dev_queue_xmit - transmit a buffer
  *     @skb: buffer to transmit
- *     @accel_priv: private data used for L2 forwarding offload
+ *     @sb_dev: suboordinate device used for L2 forwarding offload
  *
  *     Queue a buffer for transmission to a network device. The caller must
  *     have set the device and priority and built the buffer before calling
@@ -3490,7 +3714,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
  *      the BH enable code must have IRQs enabled so that it will not deadlock.
  *          --BLG
  */
-static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
 {
        struct net_device *dev = skb->dev;
        struct netdev_queue *txq;
@@ -3529,7 +3753,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
        else
                skb_dst_force(skb);
 
-       txq = netdev_pick_tx(dev, skb, accel_priv);
+       txq = netdev_pick_tx(dev, skb, sb_dev);
        q = rcu_dereference_bh(txq->qdisc);
 
        trace_net_dev_queue(skb);
@@ -3603,9 +3827,9 @@ int dev_queue_xmit(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(dev_queue_xmit);
 
-int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
+int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
 {
-       return __dev_queue_xmit(skb, accel_priv);
+       return __dev_queue_xmit(skb, sb_dev);
 }
 EXPORT_SYMBOL(dev_queue_xmit_accel);
 
@@ -4494,7 +4718,8 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
        return 0;
 }
 
-static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
+static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
+                                   struct packet_type **ppt_prev)
 {
        struct packet_type *ptype, *pt_prev;
        rx_handler_func_t *rx_handler;
@@ -4624,8 +4849,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
        if (pt_prev) {
                if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
                        goto drop;
-               else
-                       ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+               *ppt_prev = pt_prev;
        } else {
 drop:
                if (!deliver_exact)
@@ -4643,6 +4867,18 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
        return ret;
 }
 
+static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
+{
+       struct net_device *orig_dev = skb->dev;
+       struct packet_type *pt_prev = NULL;
+       int ret;
+
+       ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
+       if (pt_prev)
+               ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+       return ret;
+}
+
 /**
  *     netif_receive_skb_core - special purpose version of netif_receive_skb
  *     @skb: buffer to process
@@ -4663,13 +4899,72 @@ int netif_receive_skb_core(struct sk_buff *skb)
        int ret;
 
        rcu_read_lock();
-       ret = __netif_receive_skb_core(skb, false);
+       ret = __netif_receive_skb_one_core(skb, false);
        rcu_read_unlock();
 
        return ret;
 }
 EXPORT_SYMBOL(netif_receive_skb_core);
 
+static inline void __netif_receive_skb_list_ptype(struct list_head *head,
+                                                 struct packet_type *pt_prev,
+                                                 struct net_device *orig_dev)
+{
+       struct sk_buff *skb, *next;
+
+       if (!pt_prev)
+               return;
+       if (list_empty(head))
+               return;
+       if (pt_prev->list_func != NULL)
+               pt_prev->list_func(head, pt_prev, orig_dev);
+       else
+               list_for_each_entry_safe(skb, next, head, list)
+                       pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+}
+
+static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
+{
+       /* Fast-path assumptions:
+        * - There is no RX handler.
+        * - Only one packet_type matches.
+        * If either of these fails, we will end up doing some per-packet
+        * processing in-line, then handling the 'last ptype' for the whole
+        * sublist.  This can't cause out-of-order delivery to any single ptype,
+        * because the 'last ptype' must be constant across the sublist, and all
+        * other ptypes are handled per-packet.
+        */
+       /* Current (common) ptype of sublist */
+       struct packet_type *pt_curr = NULL;
+       /* Current (common) orig_dev of sublist */
+       struct net_device *od_curr = NULL;
+       struct list_head sublist;
+       struct sk_buff *skb, *next;
+
+       INIT_LIST_HEAD(&sublist);
+       list_for_each_entry_safe(skb, next, head, list) {
+               struct net_device *orig_dev = skb->dev;
+               struct packet_type *pt_prev = NULL;
+
+               list_del(&skb->list);
+               __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
+               if (!pt_prev)
+                       continue;
+               if (pt_curr != pt_prev || od_curr != orig_dev) {
+                       /* dispatch old sublist */
+                       __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
+                       /* start new sublist */
+                       INIT_LIST_HEAD(&sublist);
+                       pt_curr = pt_prev;
+                       od_curr = orig_dev;
+               }
+               list_add_tail(&skb->list, &sublist);
+       }
+
+       /* dispatch final sublist */
+       __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
+}
+
 static int __netif_receive_skb(struct sk_buff *skb)
 {
        int ret;
@@ -4687,14 +4982,44 @@ static int __netif_receive_skb(struct sk_buff *skb)
                 * context down to all allocation sites.
                 */
                noreclaim_flag = memalloc_noreclaim_save();
-               ret = __netif_receive_skb_core(skb, true);
+               ret = __netif_receive_skb_one_core(skb, true);
                memalloc_noreclaim_restore(noreclaim_flag);
        } else
-               ret = __netif_receive_skb_core(skb, false);
+               ret = __netif_receive_skb_one_core(skb, false);
 
        return ret;
 }
 
+static void __netif_receive_skb_list(struct list_head *head)
+{
+       unsigned long noreclaim_flag = 0;
+       struct sk_buff *skb, *next;
+       bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
+
+       list_for_each_entry_safe(skb, next, head, list) {
+               if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
+                       struct list_head sublist;
+
+                       /* Handle the previous sublist */
+                       list_cut_before(&sublist, head, &skb->list);
+                       if (!list_empty(&sublist))
+                               __netif_receive_skb_list_core(&sublist, pfmemalloc);
+                       pfmemalloc = !pfmemalloc;
+                       /* See comments in __netif_receive_skb */
+                       if (pfmemalloc)
+                               noreclaim_flag = memalloc_noreclaim_save();
+                       else
+                               memalloc_noreclaim_restore(noreclaim_flag);
+               }
+       }
+       /* Handle the remaining sublist */
+       if (!list_empty(head))
+               __netif_receive_skb_list_core(head, pfmemalloc);
+       /* Restore pflags */
+       if (pfmemalloc)
+               memalloc_noreclaim_restore(noreclaim_flag);
+}
+
 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
 {
        struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
@@ -4717,7 +5042,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
                break;
 
        case XDP_QUERY_PROG:
-               xdp->prog_attached = !!old;
                xdp->prog_id = old ? old->aux->id : 0;
                break;
 
@@ -4769,6 +5093,55 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
        return ret;
 }
 
+static void netif_receive_skb_list_internal(struct list_head *head)
+{
+       struct bpf_prog *xdp_prog = NULL;
+       struct sk_buff *skb, *next;
+       struct list_head sublist;
+
+       INIT_LIST_HEAD(&sublist);
+       list_for_each_entry_safe(skb, next, head, list) {
+               net_timestamp_check(netdev_tstamp_prequeue, skb);
+               list_del(&skb->list);
+               if (!skb_defer_rx_timestamp(skb))
+                       list_add_tail(&skb->list, &sublist);
+       }
+       list_splice_init(&sublist, head);
+
+       if (static_branch_unlikely(&generic_xdp_needed_key)) {
+               preempt_disable();
+               rcu_read_lock();
+               list_for_each_entry_safe(skb, next, head, list) {
+                       xdp_prog = rcu_dereference(skb->dev->xdp_prog);
+                       list_del(&skb->list);
+                       if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
+                               list_add_tail(&skb->list, &sublist);
+               }
+               rcu_read_unlock();
+               preempt_enable();
+               /* Put passed packets back on main list */
+               list_splice_init(&sublist, head);
+       }
+
+       rcu_read_lock();
+#ifdef CONFIG_RPS
+       if (static_key_false(&rps_needed)) {
+               list_for_each_entry_safe(skb, next, head, list) {
+                       struct rps_dev_flow voidflow, *rflow = &voidflow;
+                       int cpu = get_rps_cpu(skb->dev, skb, &rflow);
+
+                       if (cpu >= 0) {
+                               /* Will be handled, remove from list */
+                               list_del(&skb->list);
+                               enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+                       }
+               }
+       }
+#endif
+       __netif_receive_skb_list(head);
+       rcu_read_unlock();
+}
+
 /**
  *     netif_receive_skb - process receive buffer from network
  *     @skb: buffer to process
@@ -4792,6 +5165,28 @@ int netif_receive_skb(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(netif_receive_skb);
 
+/**
+ *     netif_receive_skb_list - process many receive buffers from network
+ *     @head: list of skbs to process.
+ *
+ *     Since return value of netif_receive_skb() is normally ignored, and
+ *     wouldn't be meaningful for a list, this function returns void.
+ *
+ *     This function may only be called from softirq context and interrupts
+ *     should be enabled.
+ */
+void netif_receive_skb_list(struct list_head *head)
+{
+       struct sk_buff *skb;
+
+       if (list_empty(head))
+               return;
+       list_for_each_entry(skb, head, list)
+               trace_netif_receive_skb_list_entry(skb);
+       netif_receive_skb_list_internal(head);
+}
+EXPORT_SYMBOL(netif_receive_skb_list);
+
 DEFINE_PER_CPU(struct work_struct, flush_works);
 
 /* Network device is going away, flush any packets still pending */
@@ -4875,42 +5270,50 @@ static int napi_gro_complete(struct sk_buff *skb)
        return netif_receive_skb_internal(skb);
 }
 
-/* napi->gro_list contains packets ordered by age.
- * youngest packets at the head of it.
- * Complete skbs in reverse order to reduce latencies.
- */
-void napi_gro_flush(struct napi_struct *napi, bool flush_old)
+static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
+                                  bool flush_old)
 {
-       struct sk_buff *skb, *prev = NULL;
-
-       /* scan list and build reverse chain */
-       for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
-               skb->prev = prev;
-               prev = skb;
-       }
-
-       for (skb = prev; skb; skb = prev) {
-               skb->next = NULL;
+       struct list_head *head = &napi->gro_hash[index].list;
+       struct sk_buff *skb, *p;
 
+       list_for_each_entry_safe_reverse(skb, p, head, list) {
                if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
                        return;
-
-               prev = skb->prev;
+               list_del(&skb->list);
+               skb->next = NULL;
                napi_gro_complete(skb);
-               napi->gro_count--;
+               napi->gro_hash[index].count--;
        }
 
-       napi->gro_list = NULL;
+       if (!napi->gro_hash[index].count)
+               __clear_bit(index, &napi->gro_bitmask);
+}
+
+/* napi->gro_hash[].list contains packets ordered by age.
+ * youngest packets at the head of it.
+ * Complete skbs in reverse order to reduce latencies.
+ */
+void napi_gro_flush(struct napi_struct *napi, bool flush_old)
+{
+       u32 i;
+
+       for (i = 0; i < GRO_HASH_BUCKETS; i++) {
+               if (test_bit(i, &napi->gro_bitmask))
+                       __napi_gro_flush_chain(napi, i, flush_old);
+       }
 }
 EXPORT_SYMBOL(napi_gro_flush);
 
-static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
+static struct list_head *gro_list_prepare(struct napi_struct *napi,
+                                         struct sk_buff *skb)
 {
-       struct sk_buff *p;
        unsigned int maclen = skb->dev->hard_header_len;
        u32 hash = skb_get_hash_raw(skb);
+       struct list_head *head;
+       struct sk_buff *p;
 
-       for (p = napi->gro_list; p; p = p->next) {
+       head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
+       list_for_each_entry(p, head, list) {
                unsigned long diffs;
 
                NAPI_GRO_CB(p)->flush = 0;
@@ -4933,6 +5336,8 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
                                       maclen);
                NAPI_GRO_CB(p)->same_flow = !diffs;
        }
+
+       return head;
 }
 
 static void skb_gro_reset_offset(struct sk_buff *skb)
@@ -4975,20 +5380,41 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
        }
 }
 
+static void gro_flush_oldest(struct list_head *head)
+{
+       struct sk_buff *oldest;
+
+       oldest = list_last_entry(head, struct sk_buff, list);
+
+       /* We are called with head length >= MAX_GRO_SKBS, so this is
+        * impossible.
+        */
+       if (WARN_ON_ONCE(!oldest))
+               return;
+
+       /* Do not adjust napi->gro_hash[].count, caller is adding a new
+        * SKB to the chain.
+        */
+       list_del(&oldest->list);
+       napi_gro_complete(oldest);
+}
+
 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
-       struct sk_buff **pp = NULL;
+       u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
+       struct list_head *head = &offload_base;
        struct packet_offload *ptype;
        __be16 type = skb->protocol;
-       struct list_head *head = &offload_base;
-       int same_flow;
+       struct list_head *gro_head;
+       struct sk_buff *pp = NULL;
        enum gro_result ret;
+       int same_flow;
        int grow;
 
        if (netif_elide_gro(skb->dev))
                goto normal;
 
-       gro_list_prepare(napi, skb);
+       gro_head = gro_list_prepare(napi, skb);
 
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, head, list) {
@@ -5022,7 +5448,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                        NAPI_GRO_CB(skb)->csum_valid = 0;
                }
 
-               pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
+               pp = ptype->callbacks.gro_receive(gro_head, skb);
                break;
        }
        rcu_read_unlock();
@@ -5039,12 +5465,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
 
        if (pp) {
-               struct sk_buff *nskb = *pp;
-
-               *pp = nskb->next;
-               nskb->next = NULL;
-               napi_gro_complete(nskb);
-               napi->gro_count--;
+               list_del(&pp->list);
+               pp->next = NULL;
+               napi_gro_complete(pp);
+               napi->gro_hash[hash].count--;
        }
 
        if (same_flow)
@@ -5053,26 +5477,16 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        if (NAPI_GRO_CB(skb)->flush)
                goto normal;
 
-       if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
-               struct sk_buff *nskb = napi->gro_list;
-
-               /* locate the end of the list to select the 'oldest' flow */
-               while (nskb->next) {
-                       pp = &nskb->next;
-                       nskb = *pp;
-               }
-               *pp = NULL;
-               nskb->next = NULL;
-               napi_gro_complete(nskb);
+       if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
+               gro_flush_oldest(gro_head);
        } else {
-               napi->gro_count++;
+               napi->gro_hash[hash].count++;
        }
        NAPI_GRO_CB(skb)->count = 1;
        NAPI_GRO_CB(skb)->age = jiffies;
        NAPI_GRO_CB(skb)->last = skb;
        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
-       skb->next = napi->gro_list;
-       napi->gro_list = skb;
+       list_add(&skb->list, gro_head);
        ret = GRO_HELD;
 
 pull:
@@ -5080,6 +5494,13 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        if (grow > 0)
                gro_pull_from_frag0(skb, grow);
 ok:
+       if (napi->gro_hash[hash].count) {
+               if (!test_bit(hash, &napi->gro_bitmask))
+                       __set_bit(hash, &napi->gro_bitmask);
+       } else if (test_bit(hash, &napi->gro_bitmask)) {
+               __clear_bit(hash, &napi->gro_bitmask);
+       }
+
        return ret;
 
 normal:
@@ -5478,7 +5899,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                                 NAPIF_STATE_IN_BUSY_POLL)))
                return false;
 
-       if (n->gro_list) {
+       if (n->gro_bitmask) {
                unsigned long timeout = 0;
 
                if (work_done)
@@ -5687,7 +6108,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
        /* Note : we use a relaxed variant of napi_schedule_prep() not setting
         * NAPI_STATE_MISSED, since we do not react to a device IRQ.
         */
-       if (napi->gro_list && !napi_disable_pending(napi) &&
+       if (napi->gro_bitmask && !napi_disable_pending(napi) &&
            !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
                __napi_schedule_irqoff(napi);
 
@@ -5697,11 +6118,16 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
                    int (*poll)(struct napi_struct *, int), int weight)
 {
+       int i;
+
        INIT_LIST_HEAD(&napi->poll_list);
        hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
        napi->timer.function = napi_watchdog;
-       napi->gro_count = 0;
-       napi->gro_list = NULL;
+       napi->gro_bitmask = 0;
+       for (i = 0; i < GRO_HASH_BUCKETS; i++) {
+               INIT_LIST_HEAD(&napi->gro_hash[i].list);
+               napi->gro_hash[i].count = 0;
+       }
        napi->skb = NULL;
        napi->poll = poll;
        if (weight > NAPI_POLL_WEIGHT)
@@ -5734,6 +6160,19 @@ void napi_disable(struct napi_struct *n)
 }
 EXPORT_SYMBOL(napi_disable);
 
+static void flush_gro_hash(struct napi_struct *napi)
+{
+       int i;
+
+       for (i = 0; i < GRO_HASH_BUCKETS; i++) {
+               struct sk_buff *skb, *n;
+
+               list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
+                       kfree_skb(skb);
+               napi->gro_hash[i].count = 0;
+       }
+}
+
 /* Must be called in process context */
 void netif_napi_del(struct napi_struct *napi)
 {
@@ -5743,9 +6182,8 @@ void netif_napi_del(struct napi_struct *napi)
        list_del_init(&napi->dev_list);
        napi_free_frags(napi);
 
-       kfree_skb_list(napi->gro_list);
-       napi->gro_list = NULL;
-       napi->gro_count = 0;
+       flush_gro_hash(napi);
+       napi->gro_bitmask = 0;
 }
 EXPORT_SYMBOL(netif_napi_del);
 
@@ -5787,7 +6225,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
                goto out_unlock;
        }
 
-       if (n->gro_list) {
+       if (n->gro_bitmask) {
                /* flush too old packets
                 * If HZ < 1000, flush all packets.
                 */
@@ -7276,23 +7714,21 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
 }
 EXPORT_SYMBOL(dev_change_proto_down);
 
-void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
-                    struct netdev_bpf *xdp)
+u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
+                   enum bpf_netdev_command cmd)
 {
-       memset(xdp, 0, sizeof(*xdp));
-       xdp->command = XDP_QUERY_PROG;
+       struct netdev_bpf xdp;
 
-       /* Query must always succeed. */
-       WARN_ON(bpf_op(dev, xdp) < 0);
-}
+       if (!bpf_op)
+               return 0;
 
-static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op)
-{
-       struct netdev_bpf xdp;
+       memset(&xdp, 0, sizeof(xdp));
+       xdp.command = cmd;
 
-       __dev_xdp_query(dev, bpf_op, &xdp);
+       /* Query must always succeed. */
+       WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
 
-       return xdp.prog_attached;
+       return xdp.prog_id;
 }
 
 static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
@@ -7326,12 +7762,19 @@ static void dev_xdp_uninstall(struct net_device *dev)
        if (!ndo_bpf)
                return;
 
-       __dev_xdp_query(dev, ndo_bpf, &xdp);
-       if (xdp.prog_attached == XDP_ATTACHED_NONE)
-               return;
+       memset(&xdp, 0, sizeof(xdp));
+       xdp.command = XDP_QUERY_PROG;
+       WARN_ON(ndo_bpf(dev, &xdp));
+       if (xdp.prog_id)
+               WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
+                                       NULL));
 
-       /* Program removal should always succeed */
-       WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL));
+       /* Remove HW offload */
+       memset(&xdp, 0, sizeof(xdp));
+       xdp.command = XDP_QUERY_PROG_HW;
+       if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
+               WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
+                                       NULL));
 }
 
 /**
@@ -7347,12 +7790,15 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
                      int fd, u32 flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
+       enum bpf_netdev_command query;
        struct bpf_prog *prog = NULL;
        bpf_op_t bpf_op, bpf_chk;
        int err;
 
        ASSERT_RTNL();
 
+       query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
+
        bpf_op = bpf_chk = ops->ndo_bpf;
        if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
                return -EOPNOTSUPP;
@@ -7362,10 +7808,11 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
                bpf_chk = generic_xdp_install;
 
        if (fd >= 0) {
-               if (bpf_chk && __dev_xdp_attached(dev, bpf_chk))
+               if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) ||
+                   __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW))
                        return -EEXIST;
                if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
-                   __dev_xdp_attached(dev, bpf_op))
+                   __dev_xdp_query(dev, bpf_op, query))
                        return -EBUSY;
 
                prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
@@ -8643,7 +9090,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
                /* We get here if we can't use the current device name */
                if (!pat)
                        goto out;
-               if (dev_get_valid_name(net, dev, pat) < 0)
+               err = dev_get_valid_name(net, dev, pat);
+               if (err < 0)
                        goto out;
        }
 
@@ -8655,7 +9103,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        dev_close(dev);
 
        /* And unlink it from device chain */
-       err = -ENODEV;
        unlist_netdevice(dev);
 
        synchronize_net();
@@ -8834,6 +9281,9 @@ static struct hlist_head * __net_init netdev_create_hash(void)
 /* Initialize per network namespace state */
 static int __net_init netdev_init(struct net *net)
 {
+       BUILD_BUG_ON(GRO_HASH_BUCKETS >
+                    8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask));
+
        if (net != &init_net)
                INIT_LIST_HEAD(&net->dev_base_head);
 
index a04e1e88bf3ab49340d788589c365aaf45d9d3e2..50537ff961a722e18731b7b9671deb739bfce847 100644 (file)
@@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
                if (ifr->ifr_qlen < 0)
                        return -EINVAL;
                if (dev->tx_queue_len ^ ifr->ifr_qlen) {
-                       unsigned int orig_len = dev->tx_queue_len;
-
-                       dev->tx_queue_len = ifr->ifr_qlen;
-                       err = call_netdevice_notifiers(
-                                       NETDEV_CHANGE_TX_QUEUE_LEN, dev);
-                       err = notifier_to_errno(err);
-                       if (err) {
-                               dev->tx_queue_len = orig_len;
+                       err = dev_change_tx_queue_len(dev, ifr->ifr_qlen);
+                       if (err)
                                return err;
-                       }
                }
                return 0;
 
index 22099705cc4108aa3881e5372c4c070c0032afb0..65fc366a78a4c455a02e74bdc30c9249698eb488 100644 (file)
@@ -326,6 +326,57 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
                                                  pool_type, p_tc_index);
 }
 
+struct devlink_region {
+       struct devlink *devlink;
+       struct list_head list;
+       const char *name;
+       struct list_head snapshot_list;
+       u32 max_snapshots;
+       u32 cur_snapshots;
+       u64 size;
+};
+
+struct devlink_snapshot {
+       struct list_head list;
+       struct devlink_region *region;
+       devlink_snapshot_data_dest_t *data_destructor;
+       u64 data_len;
+       u8 *data;
+       u32 id;
+};
+
+static struct devlink_region *
+devlink_region_get_by_name(struct devlink *devlink, const char *region_name)
+{
+       struct devlink_region *region;
+
+       list_for_each_entry(region, &devlink->region_list, list)
+               if (!strcmp(region->name, region_name))
+                       return region;
+
+       return NULL;
+}
+
+static struct devlink_snapshot *
+devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
+{
+       struct devlink_snapshot *snapshot;
+
+       list_for_each_entry(snapshot, &region->snapshot_list, list)
+               if (snapshot->id == id)
+                       return snapshot;
+
+       return NULL;
+}
+
+static void devlink_region_snapshot_del(struct devlink_snapshot *snapshot)
+{
+       snapshot->region->cur_snapshots--;
+       list_del(&snapshot->list);
+       (*snapshot->data_destructor)(snapshot->data);
+       kfree(snapshot);
+}
+
 #define DEVLINK_NL_FLAG_NEED_DEVLINK   BIT(0)
 #define DEVLINK_NL_FLAG_NEED_PORT      BIT(1)
 #define DEVLINK_NL_FLAG_NEED_SB                BIT(2)
@@ -2604,247 +2655,1204 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
        return devlink->ops->reload(devlink, info->extack);
 }
 
-static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
-       [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
-       [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
-       [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
-       [DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 },
-       [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
-       [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
-       [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
-       [DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
-       [DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
-       [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
-       [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
-       [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
-       [DEVLINK_ATTR_ESWITCH_MODE] = { .type = NLA_U16 },
-       [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
-       [DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
-       [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
-       [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
-       [DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
-       [DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
-};
-
-static const struct genl_ops devlink_nl_ops[] = {
-       {
-               .cmd = DEVLINK_CMD_GET,
-               .doit = devlink_nl_cmd_get_doit,
-               .dumpit = devlink_nl_cmd_get_dumpit,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_PORT_GET,
-               .doit = devlink_nl_cmd_port_get_doit,
-               .dumpit = devlink_nl_cmd_port_get_dumpit,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_PORT_SET,
-               .doit = devlink_nl_cmd_port_set_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
-       },
-       {
-               .cmd = DEVLINK_CMD_PORT_SPLIT,
-               .doit = devlink_nl_cmd_port_split_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
-       },
-       {
-               .cmd = DEVLINK_CMD_PORT_UNSPLIT,
-               .doit = devlink_nl_cmd_port_unsplit_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
-       },
-       {
-               .cmd = DEVLINK_CMD_SB_GET,
-               .doit = devlink_nl_cmd_sb_get_doit,
-               .dumpit = devlink_nl_cmd_sb_get_dumpit,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NEED_SB,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_SB_POOL_GET,
-               .doit = devlink_nl_cmd_sb_pool_get_doit,
-               .dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NEED_SB,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_SB_POOL_SET,
-               .doit = devlink_nl_cmd_sb_pool_set_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NEED_SB,
-       },
-       {
-               .cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
-               .doit = devlink_nl_cmd_sb_port_pool_get_doit,
-               .dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
-                                 DEVLINK_NL_FLAG_NEED_SB,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
-               .doit = devlink_nl_cmd_sb_port_pool_set_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
-                                 DEVLINK_NL_FLAG_NEED_SB,
-       },
+static const struct devlink_param devlink_param_generic[] = {
        {
-               .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
-               .doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
-               .dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
-                                 DEVLINK_NL_FLAG_NEED_SB,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
-               .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
-                                 DEVLINK_NL_FLAG_NEED_SB,
-       },
-       {
-               .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
-               .doit = devlink_nl_cmd_sb_occ_snapshot_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NEED_SB,
-       },
-       {
-               .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
-               .doit = devlink_nl_cmd_sb_occ_max_clear_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NEED_SB,
-       },
-       {
-               .cmd = DEVLINK_CMD_ESWITCH_GET,
-               .doit = devlink_nl_cmd_eswitch_get_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
-       },
-       {
-               .cmd = DEVLINK_CMD_ESWITCH_SET,
-               .doit = devlink_nl_cmd_eswitch_set_doit,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
-       },
-       {
-               .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
-               .doit = devlink_nl_cmd_dpipe_table_get,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
-               .doit = devlink_nl_cmd_dpipe_entries_get,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
-               .doit = devlink_nl_cmd_dpipe_headers_get,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
-               /* can be retrieved by unprivileged users */
-       },
-       {
-               .cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
-               .doit = devlink_nl_cmd_dpipe_table_counters_set,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+               .id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+               .name = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME,
+               .type = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE,
        },
        {
-               .cmd = DEVLINK_CMD_RESOURCE_SET,
-               .doit = devlink_nl_cmd_resource_set,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+               .id = DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+               .name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME,
+               .type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE,
        },
        {
-               .cmd = DEVLINK_CMD_RESOURCE_DUMP,
-               .doit = devlink_nl_cmd_resource_dump,
-               .policy = devlink_nl_policy,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
-               /* can be retrieved by unprivileged users */
+               .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
+               .name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME,
+               .type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE,
        },
        {
-               .cmd = DEVLINK_CMD_RELOAD,
-               .doit = devlink_nl_cmd_reload,
-               .policy = devlink_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .id = DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+               .name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME,
+               .type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE,
        },
 };
 
-static struct genl_family devlink_nl_family __ro_after_init = {
-       .name           = DEVLINK_GENL_NAME,
-       .version        = DEVLINK_GENL_VERSION,
-       .maxattr        = DEVLINK_ATTR_MAX,
-       .netnsok        = true,
-       .pre_doit       = devlink_nl_pre_doit,
-       .post_doit      = devlink_nl_post_doit,
-       .module         = THIS_MODULE,
-       .ops            = devlink_nl_ops,
-       .n_ops          = ARRAY_SIZE(devlink_nl_ops),
-       .mcgrps         = devlink_nl_mcgrps,
-       .n_mcgrps       = ARRAY_SIZE(devlink_nl_mcgrps),
-};
+static int devlink_param_generic_verify(const struct devlink_param *param)
+{
+       /* verify it match generic parameter by id and name */
+       if (param->id > DEVLINK_PARAM_GENERIC_ID_MAX)
+               return -EINVAL;
+       if (strcmp(param->name, devlink_param_generic[param->id].name))
+               return -ENOENT;
 
-/**
- *     devlink_alloc - Allocate new devlink instance resources
- *
- *     @ops: ops
- *     @priv_size: size of user private data
- *
- *     Allocate new devlink instance resources, including devlink index
- *     and name.
- */
-struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
+       WARN_ON(param->type != devlink_param_generic[param->id].type);
+
+       return 0;
+}
+
+static int devlink_param_driver_verify(const struct devlink_param *param)
 {
-       struct devlink *devlink;
+       int i;
 
-       devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
-       if (!devlink)
-               return NULL;
-       devlink->ops = ops;
-       devlink_net_set(devlink, &init_net);
-       INIT_LIST_HEAD(&devlink->port_list);
-       INIT_LIST_HEAD(&devlink->sb_list);
+       if (param->id <= DEVLINK_PARAM_GENERIC_ID_MAX)
+               return -EINVAL;
+       /* verify no such name in generic params */
+       for (i = 0; i <= DEVLINK_PARAM_GENERIC_ID_MAX; i++)
+               if (!strcmp(param->name, devlink_param_generic[i].name))
+                       return -EEXIST;
+
+       return 0;
+}
+
+static struct devlink_param_item *
+devlink_param_find_by_name(struct list_head *param_list,
+                          const char *param_name)
+{
+       struct devlink_param_item *param_item;
+
+       list_for_each_entry(param_item, param_list, list)
+               if (!strcmp(param_item->param->name, param_name))
+                       return param_item;
+       return NULL;
+}
+
+static struct devlink_param_item *
+devlink_param_find_by_id(struct list_head *param_list, u32 param_id)
+{
+       struct devlink_param_item *param_item;
+
+       list_for_each_entry(param_item, param_list, list)
+               if (param_item->param->id == param_id)
+                       return param_item;
+       return NULL;
+}
+
+static bool
+devlink_param_cmode_is_supported(const struct devlink_param *param,
+                                enum devlink_param_cmode cmode)
+{
+       return test_bit(cmode, &param->supported_cmodes);
+}
+
+static int devlink_param_get(struct devlink *devlink,
+                            const struct devlink_param *param,
+                            struct devlink_param_gset_ctx *ctx)
+{
+       if (!param->get)
+               return -EOPNOTSUPP;
+       return param->get(devlink, param->id, ctx);
+}
+
+static int devlink_param_set(struct devlink *devlink,
+                            const struct devlink_param *param,
+                            struct devlink_param_gset_ctx *ctx)
+{
+       if (!param->set)
+               return -EOPNOTSUPP;
+       return param->set(devlink, param->id, ctx);
+}
+
+static int
+devlink_param_type_to_nla_type(enum devlink_param_type param_type)
+{
+       switch (param_type) {
+       case DEVLINK_PARAM_TYPE_U8:
+               return NLA_U8;
+       case DEVLINK_PARAM_TYPE_U16:
+               return NLA_U16;
+       case DEVLINK_PARAM_TYPE_U32:
+               return NLA_U32;
+       case DEVLINK_PARAM_TYPE_STRING:
+               return NLA_STRING;
+       case DEVLINK_PARAM_TYPE_BOOL:
+               return NLA_FLAG;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int
+devlink_nl_param_value_fill_one(struct sk_buff *msg,
+                               enum devlink_param_type type,
+                               enum devlink_param_cmode cmode,
+                               union devlink_param_value val)
+{
+       struct nlattr *param_value_attr;
+
+       param_value_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM_VALUE);
+       if (!param_value_attr)
+               goto nla_put_failure;
+
+       if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode))
+               goto value_nest_cancel;
+
+       switch (type) {
+       case DEVLINK_PARAM_TYPE_U8:
+               if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8))
+                       goto value_nest_cancel;
+               break;
+       case DEVLINK_PARAM_TYPE_U16:
+               if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16))
+                       goto value_nest_cancel;
+               break;
+       case DEVLINK_PARAM_TYPE_U32:
+               if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32))
+                       goto value_nest_cancel;
+               break;
+       case DEVLINK_PARAM_TYPE_STRING:
+               if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
+                                  val.vstr))
+                       goto value_nest_cancel;
+               break;
+       case DEVLINK_PARAM_TYPE_BOOL:
+               if (val.vbool &&
+                   nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA))
+                       goto value_nest_cancel;
+               break;
+       }
+
+       nla_nest_end(msg, param_value_attr);
+       return 0;
+
+value_nest_cancel:
+       nla_nest_cancel(msg, param_value_attr);
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
+                                struct devlink_param_item *param_item,
+                                enum devlink_command cmd,
+                                u32 portid, u32 seq, int flags)
+{
+       union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1];
+       const struct devlink_param *param = param_item->param;
+       struct devlink_param_gset_ctx ctx;
+       struct nlattr *param_values_list;
+       struct nlattr *param_attr;
+       int nla_type;
+       void *hdr;
+       int err;
+       int i;
+
+       /* Get value from driver part to driverinit configuration mode */
+       for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
+               if (!devlink_param_cmode_is_supported(param, i))
+                       continue;
+               if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) {
+                       if (!param_item->driverinit_value_valid)
+                               return -EOPNOTSUPP;
+                       param_value[i] = param_item->driverinit_value;
+               } else {
+                       ctx.cmode = i;
+                       err = devlink_param_get(devlink, param, &ctx);
+                       if (err)
+                               return err;
+                       param_value[i] = ctx.val;
+               }
+       }
+
+       hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (devlink_nl_put_handle(msg, devlink))
+               goto genlmsg_cancel;
+       param_attr = nla_nest_start(msg, DEVLINK_ATTR_PARAM);
+       if (!param_attr)
+               goto genlmsg_cancel;
+       if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name))
+               goto param_nest_cancel;
+       if (param->generic && nla_put_flag(msg, DEVLINK_ATTR_PARAM_GENERIC))
+               goto param_nest_cancel;
+
+       nla_type = devlink_param_type_to_nla_type(param->type);
+       if (nla_type < 0)
+               goto param_nest_cancel;
+       if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type))
+               goto param_nest_cancel;
+
+       param_values_list = nla_nest_start(msg, DEVLINK_ATTR_PARAM_VALUES_LIST);
+       if (!param_values_list)
+               goto param_nest_cancel;
+
+       for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
+               if (!devlink_param_cmode_is_supported(param, i))
+                       continue;
+               err = devlink_nl_param_value_fill_one(msg, param->type,
+                                                     i, param_value[i]);
+               if (err)
+                       goto values_list_nest_cancel;
+       }
+
+       nla_nest_end(msg, param_values_list);
+       nla_nest_end(msg, param_attr);
+       genlmsg_end(msg, hdr);
+       return 0;
+
+values_list_nest_cancel:
+       nla_nest_end(msg, param_values_list);
+param_nest_cancel:
+       nla_nest_cancel(msg, param_attr);
+genlmsg_cancel:
+       genlmsg_cancel(msg, hdr);
+       return -EMSGSIZE;
+}
+
+static void devlink_param_notify(struct devlink *devlink,
+                                struct devlink_param_item *param_item,
+                                enum devlink_command cmd)
+{
+       struct sk_buff *msg;
+       int err;
+
+       WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL);
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return;
+       err = devlink_nl_param_fill(msg, devlink, param_item, cmd, 0, 0, 0);
+       if (err) {
+               nlmsg_free(msg);
+               return;
+       }
+
+       genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+                               msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
+                                          struct netlink_callback *cb)
+{
+       struct devlink_param_item *param_item;
+       struct devlink *devlink;
+       int start = cb->args[0];
+       int idx = 0;
+       int err;
+
+       mutex_lock(&devlink_mutex);
+       list_for_each_entry(devlink, &devlink_list, list) {
+               if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+                       continue;
+               mutex_lock(&devlink->lock);
+               list_for_each_entry(param_item, &devlink->param_list, list) {
+                       if (idx < start) {
+                               idx++;
+                               continue;
+                       }
+                       err = devlink_nl_param_fill(msg, devlink, param_item,
+                                                   DEVLINK_CMD_PARAM_GET,
+                                                   NETLINK_CB(cb->skb).portid,
+                                                   cb->nlh->nlmsg_seq,
+                                                   NLM_F_MULTI);
+                       if (err) {
+                               mutex_unlock(&devlink->lock);
+                               goto out;
+                       }
+                       idx++;
+               }
+               mutex_unlock(&devlink->lock);
+       }
+out:
+       mutex_unlock(&devlink_mutex);
+
+       cb->args[0] = idx;
+       return msg->len;
+}
+
+static int
+devlink_param_type_get_from_info(struct genl_info *info,
+                                enum devlink_param_type *param_type)
+{
+       if (!info->attrs[DEVLINK_ATTR_PARAM_TYPE])
+               return -EINVAL;
+
+       switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) {
+       case NLA_U8:
+               *param_type = DEVLINK_PARAM_TYPE_U8;
+               break;
+       case NLA_U16:
+               *param_type = DEVLINK_PARAM_TYPE_U16;
+               break;
+       case NLA_U32:
+               *param_type = DEVLINK_PARAM_TYPE_U32;
+               break;
+       case NLA_STRING:
+               *param_type = DEVLINK_PARAM_TYPE_STRING;
+               break;
+       case NLA_FLAG:
+               *param_type = DEVLINK_PARAM_TYPE_BOOL;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+devlink_param_value_get_from_info(const struct devlink_param *param,
+                                 struct genl_info *info,
+                                 union devlink_param_value *value)
+{
+       if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
+           !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
+               return -EINVAL;
+
+       switch (param->type) {
+       case DEVLINK_PARAM_TYPE_U8:
+               value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               break;
+       case DEVLINK_PARAM_TYPE_U16:
+               value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               break;
+       case DEVLINK_PARAM_TYPE_U32:
+               value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               break;
+       case DEVLINK_PARAM_TYPE_STRING:
+               if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) >
+                   DEVLINK_PARAM_MAX_STRING_VALUE)
+                       return -EINVAL;
+               value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               break;
+       case DEVLINK_PARAM_TYPE_BOOL:
+               value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
+                              true : false;
+               break;
+       }
+       return 0;
+}
+
+static struct devlink_param_item *
+devlink_param_get_from_info(struct devlink *devlink,
+                           struct genl_info *info)
+{
+       char *param_name;
+
+       if (!info->attrs[DEVLINK_ATTR_PARAM_NAME])
+               return NULL;
+
+       param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]);
+       return devlink_param_find_by_name(&devlink->param_list, param_name);
+}
+
+static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
+                                        struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_param_item *param_item;
+       struct sk_buff *msg;
+       int err;
+
+       param_item = devlink_param_get_from_info(devlink, info);
+       if (!param_item)
+               return -EINVAL;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       err = devlink_nl_param_fill(msg, devlink, param_item,
+                                   DEVLINK_CMD_PARAM_GET,
+                                   info->snd_portid, info->snd_seq, 0);
+       if (err) {
+               nlmsg_free(msg);
+               return err;
+       }
+
+       return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
+                                        struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       enum devlink_param_type param_type;
+       struct devlink_param_gset_ctx ctx;
+       enum devlink_param_cmode cmode;
+       struct devlink_param_item *param_item;
+       const struct devlink_param *param;
+       union devlink_param_value value;
+       int err = 0;
+
+       param_item = devlink_param_get_from_info(devlink, info);
+       if (!param_item)
+               return -EINVAL;
+       param = param_item->param;
+       err = devlink_param_type_get_from_info(info, &param_type);
+       if (err)
+               return err;
+       if (param_type != param->type)
+               return -EINVAL;
+       err = devlink_param_value_get_from_info(param, info, &value);
+       if (err)
+               return err;
+       if (param->validate) {
+               err = param->validate(devlink, param->id, value, info->extack);
+               if (err)
+                       return err;
+       }
+
+       if (!info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE])
+               return -EINVAL;
+       cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]);
+       if (!devlink_param_cmode_is_supported(param, cmode))
+               return -EOPNOTSUPP;
+
+       if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
+               param_item->driverinit_value = value;
+               param_item->driverinit_value_valid = true;
+       } else {
+               if (!param->set)
+                       return -EOPNOTSUPP;
+               ctx.val = value;
+               ctx.cmode = cmode;
+               err = devlink_param_set(devlink, param, &ctx);
+               if (err)
+                       return err;
+       }
+
+       devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+       return 0;
+}
+
+static int devlink_param_register_one(struct devlink *devlink,
+                                     const struct devlink_param *param)
+{
+       struct devlink_param_item *param_item;
+
+       if (devlink_param_find_by_name(&devlink->param_list,
+                                      param->name))
+               return -EEXIST;
+
+       if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
+               WARN_ON(param->get || param->set);
+       else
+               WARN_ON(!param->get || !param->set);
+
+       param_item = kzalloc(sizeof(*param_item), GFP_KERNEL);
+       if (!param_item)
+               return -ENOMEM;
+       param_item->param = param;
+
+       list_add_tail(&param_item->list, &devlink->param_list);
+       devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+       return 0;
+}
+
+static void devlink_param_unregister_one(struct devlink *devlink,
+                                        const struct devlink_param *param)
+{
+       struct devlink_param_item *param_item;
+
+       param_item = devlink_param_find_by_name(&devlink->param_list,
+                                               param->name);
+       WARN_ON(!param_item);
+       devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_DEL);
+       list_del(&param_item->list);
+       kfree(param_item);
+}
+
+static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
+                                            struct devlink *devlink,
+                                            struct devlink_snapshot *snapshot)
+{
+       struct nlattr *snap_attr;
+       int err;
+
+       snap_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_SNAPSHOT);
+       if (!snap_attr)
+               return -EINVAL;
+
+       err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, snapshot->id);
+       if (err)
+               goto nla_put_failure;
+
+       nla_nest_end(msg, snap_attr);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(msg, snap_attr);
+       return err;
+}
+
+static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg,
+                                             struct devlink *devlink,
+                                             struct devlink_region *region)
+{
+       struct devlink_snapshot *snapshot;
+       struct nlattr *snapshots_attr;
+       int err;
+
+       snapshots_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_SNAPSHOTS);
+       if (!snapshots_attr)
+               return -EINVAL;
+
+       list_for_each_entry(snapshot, &region->snapshot_list, list) {
+               err = devlink_nl_region_snapshot_id_put(msg, devlink, snapshot);
+               if (err)
+                       goto nla_put_failure;
+       }
+
+       nla_nest_end(msg, snapshots_attr);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(msg, snapshots_attr);
+       return err;
+}
+
+static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
+                                 enum devlink_command cmd, u32 portid,
+                                 u32 seq, int flags,
+                                 struct devlink_region *region)
+{
+       void *hdr;
+       int err;
+
+       hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       err = devlink_nl_put_handle(msg, devlink);
+       if (err)
+               goto nla_put_failure;
+
+       err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->name);
+       if (err)
+               goto nla_put_failure;
+
+       err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
+                               region->size,
+                               DEVLINK_ATTR_PAD);
+       if (err)
+               goto nla_put_failure;
+
+       err = devlink_nl_region_snapshots_id_put(msg, devlink, region);
+       if (err)
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       return err;
+}
+
+static void devlink_nl_region_notify(struct devlink_region *region,
+                                    struct devlink_snapshot *snapshot,
+                                    enum devlink_command cmd)
+{
+       struct devlink *devlink = region->devlink;
+       struct sk_buff *msg;
+       void *hdr;
+       int err;
+
+       WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL);
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return;
+
+       hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd);
+       if (!hdr)
+               goto out_free_msg;
+
+       err = devlink_nl_put_handle(msg, devlink);
+       if (err)
+               goto out_cancel_msg;
+
+       err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
+                            region->name);
+       if (err)
+               goto out_cancel_msg;
+
+       if (snapshot) {
+               err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID,
+                                 snapshot->id);
+               if (err)
+                       goto out_cancel_msg;
+       } else {
+               err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
+                                       region->size, DEVLINK_ATTR_PAD);
+               if (err)
+                       goto out_cancel_msg;
+       }
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+                               msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+
+       return;
+
+out_cancel_msg:
+       genlmsg_cancel(msg, hdr);
+out_free_msg:
+       nlmsg_free(msg);
+}
+
+static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb,
+                                         struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_region *region;
+       const char *region_name;
+       struct sk_buff *msg;
+       int err;
+
+       if (!info->attrs[DEVLINK_ATTR_REGION_NAME])
+               return -EINVAL;
+
+       region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+       region = devlink_region_get_by_name(devlink, region_name);
+       if (!region)
+               return -EINVAL;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       err = devlink_nl_region_fill(msg, devlink, DEVLINK_CMD_REGION_GET,
+                                    info->snd_portid, info->snd_seq, 0,
+                                    region);
+       if (err) {
+               nlmsg_free(msg);
+               return err;
+       }
+
+       return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg,
+                                           struct netlink_callback *cb)
+{
+       struct devlink_region *region;
+       struct devlink *devlink;
+       int start = cb->args[0];
+       int idx = 0;
+       int err;
+
+       mutex_lock(&devlink_mutex);
+       list_for_each_entry(devlink, &devlink_list, list) {
+               if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+                       continue;
+
+               mutex_lock(&devlink->lock);
+               list_for_each_entry(region, &devlink->region_list, list) {
+                       if (idx < start) {
+                               idx++;
+                               continue;
+                       }
+                       err = devlink_nl_region_fill(msg, devlink,
+                                                    DEVLINK_CMD_REGION_GET,
+                                                    NETLINK_CB(cb->skb).portid,
+                                                    cb->nlh->nlmsg_seq,
+                                                    NLM_F_MULTI, region);
+                       if (err) {
+                               mutex_unlock(&devlink->lock);
+                               goto out;
+                       }
+                       idx++;
+               }
+               mutex_unlock(&devlink->lock);
+       }
+out:
+       mutex_unlock(&devlink_mutex);
+       cb->args[0] = idx;
+       return msg->len;
+}
+
+static int devlink_nl_cmd_region_del(struct sk_buff *skb,
+                                    struct genl_info *info)
+{
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_snapshot *snapshot;
+       struct devlink_region *region;
+       const char *region_name;
+       u32 snapshot_id;
+
+       if (!info->attrs[DEVLINK_ATTR_REGION_NAME] ||
+           !info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID])
+               return -EINVAL;
+
+       region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+       snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
+
+       region = devlink_region_get_by_name(devlink, region_name);
+       if (!region)
+               return -EINVAL;
+
+       snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
+       if (!snapshot)
+               return -EINVAL;
+
+       devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
+       devlink_region_snapshot_del(snapshot);
+       return 0;
+}
+
+static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg,
+                                                struct devlink *devlink,
+                                                u8 *chunk, u32 chunk_size,
+                                                u64 addr)
+{
+       struct nlattr *chunk_attr;
+       int err;
+
+       chunk_attr = nla_nest_start(msg, DEVLINK_ATTR_REGION_CHUNK);
+       if (!chunk_attr)
+               return -EINVAL;
+
+       err = nla_put(msg, DEVLINK_ATTR_REGION_CHUNK_DATA, chunk_size, chunk);
+       if (err)
+               goto nla_put_failure;
+
+       err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr,
+                               DEVLINK_ATTR_PAD);
+       if (err)
+               goto nla_put_failure;
+
+       nla_nest_end(msg, chunk_attr);
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(msg, chunk_attr);
+       return err;
+}
+
+#define DEVLINK_REGION_READ_CHUNK_SIZE 256
+
+static int devlink_nl_region_read_snapshot_fill(struct sk_buff *skb,
+                                               struct devlink *devlink,
+                                               struct devlink_region *region,
+                                               struct nlattr **attrs,
+                                               u64 start_offset,
+                                               u64 end_offset,
+                                               bool dump,
+                                               u64 *new_offset)
+{
+       struct devlink_snapshot *snapshot;
+       u64 curr_offset = start_offset;
+       u32 snapshot_id;
+       int err = 0;
+
+       *new_offset = start_offset;
+
+       snapshot_id = nla_get_u32(attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
+       snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
+       if (!snapshot)
+               return -EINVAL;
+
+       if (end_offset > snapshot->data_len || dump)
+               end_offset = snapshot->data_len;
+
+       while (curr_offset < end_offset) {
+               u32 data_size;
+               u8 *data;
+
+               if (end_offset - curr_offset < DEVLINK_REGION_READ_CHUNK_SIZE)
+                       data_size = end_offset - curr_offset;
+               else
+                       data_size = DEVLINK_REGION_READ_CHUNK_SIZE;
+
+               data = &snapshot->data[curr_offset];
+               err = devlink_nl_cmd_region_read_chunk_fill(skb, devlink,
+                                                           data, data_size,
+                                                           curr_offset);
+               if (err)
+                       break;
+
+               curr_offset += data_size;
+       }
+       *new_offset = curr_offset;
+
+       return err;
+}
+
+static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
+                                            struct netlink_callback *cb)
+{
+       u64 ret_offset, start_offset, end_offset = 0;
+       struct nlattr *attrs[DEVLINK_ATTR_MAX + 1];
+       const struct genl_ops *ops = cb->data;
+       struct devlink_region *region;
+       struct nlattr *chunks_attr;
+       const char *region_name;
+       struct devlink *devlink;
+       bool dump = true;
+       void *hdr;
+       int err;
+
+       start_offset = *((u64 *)&cb->args[0]);
+
+       err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize,
+                         attrs, DEVLINK_ATTR_MAX, ops->policy, NULL);
+       if (err)
+               goto out;
+
+       devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
+       if (IS_ERR(devlink))
+               goto out;
+
+       mutex_lock(&devlink_mutex);
+       mutex_lock(&devlink->lock);
+
+       if (!attrs[DEVLINK_ATTR_REGION_NAME] ||
+           !attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID])
+               goto out_unlock;
+
+       region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]);
+       region = devlink_region_get_by_name(devlink, region_name);
+       if (!region)
+               goto out_unlock;
+
+       hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+                         &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
+                         DEVLINK_CMD_REGION_READ);
+       if (!hdr)
+               goto out_unlock;
+
+       err = devlink_nl_put_handle(skb, devlink);
+       if (err)
+               goto nla_put_failure;
+
+       err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
+       if (err)
+               goto nla_put_failure;
+
+       chunks_attr = nla_nest_start(skb, DEVLINK_ATTR_REGION_CHUNKS);
+       if (!chunks_attr)
+               goto nla_put_failure;
+
+       if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] &&
+           attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) {
+               if (!start_offset)
+                       start_offset =
+                               nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
+
+               end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
+               end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
+               dump = false;
+       }
+
+       err = devlink_nl_region_read_snapshot_fill(skb, devlink,
+                                                  region, attrs,
+                                                  start_offset,
+                                                  end_offset, dump,
+                                                  &ret_offset);
+
+       if (err && err != -EMSGSIZE)
+               goto nla_put_failure;
+
+       /* Check if there was any progress done to prevent infinite loop */
+       if (ret_offset == start_offset)
+               goto nla_put_failure;
+
+       *((u64 *)&cb->args[0]) = ret_offset;
+
+       nla_nest_end(skb, chunks_attr);
+       genlmsg_end(skb, hdr);
+       mutex_unlock(&devlink->lock);
+       mutex_unlock(&devlink_mutex);
+
+       return skb->len;
+
+nla_put_failure:
+       genlmsg_cancel(skb, hdr);
+out_unlock:
+       mutex_unlock(&devlink->lock);
+       mutex_unlock(&devlink_mutex);
+out:
+       return 0;
+}
+
+static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
+       [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
+       [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
+       [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 },
+       [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
+       [DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
+       [DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
+       [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
+       [DEVLINK_ATTR_ESWITCH_MODE] = { .type = NLA_U16 },
+       [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
+       [DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
+       [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
+       [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
+       [DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
+       [DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
+       [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING },
+       [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 },
+       [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
+       [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
+       [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
+};
+
+static const struct genl_ops devlink_nl_ops[] = {
+       {
+               .cmd = DEVLINK_CMD_GET,
+               .doit = devlink_nl_cmd_get_doit,
+               .dumpit = devlink_nl_cmd_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_PORT_GET,
+               .doit = devlink_nl_cmd_port_get_doit,
+               .dumpit = devlink_nl_cmd_port_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_PORT_SET,
+               .doit = devlink_nl_cmd_port_set_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+       },
+       {
+               .cmd = DEVLINK_CMD_PORT_SPLIT,
+               .doit = devlink_nl_cmd_port_split_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NO_LOCK,
+       },
+       {
+               .cmd = DEVLINK_CMD_PORT_UNSPLIT,
+               .doit = devlink_nl_cmd_port_unsplit_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NO_LOCK,
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_GET,
+               .doit = devlink_nl_cmd_sb_get_doit,
+               .dumpit = devlink_nl_cmd_sb_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_POOL_GET,
+               .doit = devlink_nl_cmd_sb_pool_get_doit,
+               .dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_POOL_SET,
+               .doit = devlink_nl_cmd_sb_pool_set_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
+               .doit = devlink_nl_cmd_sb_port_pool_get_doit,
+               .dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
+               .doit = devlink_nl_cmd_sb_port_pool_set_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
+               .doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
+               .dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+               .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
+               .doit = devlink_nl_cmd_sb_occ_snapshot_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+       },
+       {
+               .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+               .doit = devlink_nl_cmd_sb_occ_max_clear_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NEED_SB,
+       },
+       {
+               .cmd = DEVLINK_CMD_ESWITCH_GET,
+               .doit = devlink_nl_cmd_eswitch_get_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_ESWITCH_SET,
+               .doit = devlink_nl_cmd_eswitch_set_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NO_LOCK,
+       },
+       {
+               .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
+               .doit = devlink_nl_cmd_dpipe_table_get,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
+               .doit = devlink_nl_cmd_dpipe_entries_get,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
+               .doit = devlink_nl_cmd_dpipe_headers_get,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+               .doit = devlink_nl_cmd_dpipe_table_counters_set,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_RESOURCE_SET,
+               .doit = devlink_nl_cmd_resource_set,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_RESOURCE_DUMP,
+               .doit = devlink_nl_cmd_resource_dump,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_RELOAD,
+               .doit = devlink_nl_cmd_reload,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+                                 DEVLINK_NL_FLAG_NO_LOCK,
+       },
+       {
+               .cmd = DEVLINK_CMD_PARAM_GET,
+               .doit = devlink_nl_cmd_param_get_doit,
+               .dumpit = devlink_nl_cmd_param_get_dumpit,
+               .policy = devlink_nl_policy,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+               /* can be retrieved by unprivileged users */
+       },
+       {
+               .cmd = DEVLINK_CMD_PARAM_SET,
+               .doit = devlink_nl_cmd_param_set_doit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_REGION_GET,
+               .doit = devlink_nl_cmd_region_get_doit,
+               .dumpit = devlink_nl_cmd_region_get_dumpit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_REGION_DEL,
+               .doit = devlink_nl_cmd_region_del,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+       {
+               .cmd = DEVLINK_CMD_REGION_READ,
+               .dumpit = devlink_nl_cmd_region_read_dumpit,
+               .policy = devlink_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+       },
+};
+
+static struct genl_family devlink_nl_family __ro_after_init = {
+       .name           = DEVLINK_GENL_NAME,
+       .version        = DEVLINK_GENL_VERSION,
+       .maxattr        = DEVLINK_ATTR_MAX,
+       .netnsok        = true,
+       .pre_doit       = devlink_nl_pre_doit,
+       .post_doit      = devlink_nl_post_doit,
+       .module         = THIS_MODULE,
+       .ops            = devlink_nl_ops,
+       .n_ops          = ARRAY_SIZE(devlink_nl_ops),
+       .mcgrps         = devlink_nl_mcgrps,
+       .n_mcgrps       = ARRAY_SIZE(devlink_nl_mcgrps),
+};
+
+/**
+ *     devlink_alloc - Allocate new devlink instance resources
+ *
+ *     @ops: ops
+ *     @priv_size: size of user private data
+ *
+ *     Allocate new devlink instance resources, including devlink index
+ *     and name.
+ */
+struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
+{
+       struct devlink *devlink;
+
+       devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
+       if (!devlink)
+               return NULL;
+       devlink->ops = ops;
+       devlink_net_set(devlink, &init_net);
+       INIT_LIST_HEAD(&devlink->port_list);
+       INIT_LIST_HEAD(&devlink->sb_list);
        INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
        INIT_LIST_HEAD(&devlink->resource_list);
+       INIT_LIST_HEAD(&devlink->param_list);
+       INIT_LIST_HEAD(&devlink->region_list);
        mutex_init(&devlink->lock);
        return devlink;
 }
@@ -3434,6 +4442,320 @@ void devlink_resource_occ_get_unregister(struct devlink *devlink,
 }
 EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
 
+/**
+ *     devlink_params_register - register configuration parameters
+ *
+ *     @devlink: devlink
+ *     @params: configuration parameters array
+ *     @params_count: number of parameters provided
+ *
+ *     Register the configuration parameters supported by the driver.
+ */
+int devlink_params_register(struct devlink *devlink,
+                           const struct devlink_param *params,
+                           size_t params_count)
+{
+       const struct devlink_param *param = params;
+       int i;
+       int err;
+
+       mutex_lock(&devlink->lock);
+       for (i = 0; i < params_count; i++, param++) {
+               if (!param || !param->name || !param->supported_cmodes) {
+                       err = -EINVAL;
+                       goto rollback;
+               }
+               if (param->generic) {
+                       err = devlink_param_generic_verify(param);
+                       if (err)
+                               goto rollback;
+               } else {
+                       err = devlink_param_driver_verify(param);
+                       if (err)
+                               goto rollback;
+               }
+               err = devlink_param_register_one(devlink, param);
+               if (err)
+                       goto rollback;
+       }
+
+       mutex_unlock(&devlink->lock);
+       return 0;
+
+rollback:
+       if (!i)
+               goto unlock;
+       for (param--; i > 0; i--, param--)
+               devlink_param_unregister_one(devlink, param);
+unlock:
+       mutex_unlock(&devlink->lock);
+       return err;
+}
+EXPORT_SYMBOL_GPL(devlink_params_register);
+
+/**
+ *     devlink_params_unregister - unregister configuration parameters
+ *     @devlink: devlink
+ *     @params: configuration parameters to unregister
+ *     @params_count: number of parameters provided
+ */
+void devlink_params_unregister(struct devlink *devlink,
+                              const struct devlink_param *params,
+                              size_t params_count)
+{
+       const struct devlink_param *param = params;
+       int i;
+
+       mutex_lock(&devlink->lock);
+       for (i = 0; i < params_count; i++, param++)
+               devlink_param_unregister_one(devlink, param);
+       mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_params_unregister);
+
+/**
+ *     devlink_param_driverinit_value_get - get configuration parameter
+ *                                          value for driver initializing
+ *
+ *     @devlink: devlink
+ *     @param_id: parameter ID
+ *     @init_val: value of parameter in driverinit configuration mode
+ *
+ *     This function should be used by the driver to get driverinit
+ *     configuration for initialization after reload command.
+ */
+int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+                                      union devlink_param_value *init_val)
+{
+       struct devlink_param_item *param_item;
+
+       if (!devlink->ops || !devlink->ops->reload)
+               return -EOPNOTSUPP;
+
+       param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+       if (!param_item)
+               return -EINVAL;
+
+       if (!param_item->driverinit_value_valid ||
+           !devlink_param_cmode_is_supported(param_item->param,
+                                             DEVLINK_PARAM_CMODE_DRIVERINIT))
+               return -EOPNOTSUPP;
+
+       *init_val = param_item->driverinit_value;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_get);
+
+/**
+ *     devlink_param_driverinit_value_set - set value of configuration
+ *                                          parameter for driverinit
+ *                                          configuration mode
+ *
+ *     @devlink: devlink
+ *     @param_id: parameter ID
+ *     @init_val: value of parameter to set for driverinit configuration mode
+ *
+ *     This function should be used by the driver to set driverinit
+ *     configuration mode default value.
+ */
+int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+                                      union devlink_param_value init_val)
+{
+       struct devlink_param_item *param_item;
+
+       param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+       if (!param_item)
+               return -EINVAL;
+
+       if (!devlink_param_cmode_is_supported(param_item->param,
+                                             DEVLINK_PARAM_CMODE_DRIVERINIT))
+               return -EOPNOTSUPP;
+
+       param_item->driverinit_value = init_val;
+       param_item->driverinit_value_valid = true;
+
+       devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_param_driverinit_value_set);
+
+/**
+ *     devlink_param_value_changed - notify devlink on a parameter's value
+ *                                   change. Should be called by the driver
+ *                                   right after the change.
+ *
+ *     @devlink: devlink
+ *     @param_id: parameter ID
+ *
+ *     This function should be used by the driver to notify devlink on value
+ *     change, excluding driverinit configuration mode.
+ *     For driverinit configuration mode driver should use the function
+ *     devlink_param_driverinit_value_set() instead.
+ */
+void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
+{
+       struct devlink_param_item *param_item;
+
+       param_item = devlink_param_find_by_id(&devlink->param_list, param_id);
+       WARN_ON(!param_item);
+
+       devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
+}
+EXPORT_SYMBOL_GPL(devlink_param_value_changed);
+
+/**
+ *     devlink_region_create - create a new address region
+ *
+ *     @devlink: devlink
+ *     @region_name: region name
+ *     @region_max_snapshots: Maximum supported number of snapshots for region
+ *     @region_size: size of region
+ */
+struct devlink_region *devlink_region_create(struct devlink *devlink,
+                                            const char *region_name,
+                                            u32 region_max_snapshots,
+                                            u64 region_size)
+{
+       struct devlink_region *region;
+       int err = 0;
+
+       mutex_lock(&devlink->lock);
+
+       if (devlink_region_get_by_name(devlink, region_name)) {
+               err = -EEXIST;
+               goto unlock;
+       }
+
+       region = kzalloc(sizeof(*region), GFP_KERNEL);
+       if (!region) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       region->devlink = devlink;
+       region->max_snapshots = region_max_snapshots;
+       region->name = region_name;
+       region->size = region_size;
+       INIT_LIST_HEAD(&region->snapshot_list);
+       list_add_tail(&region->list, &devlink->region_list);
+       devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
+
+       mutex_unlock(&devlink->lock);
+       return region;
+
+unlock:
+       mutex_unlock(&devlink->lock);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(devlink_region_create);
+
+/**
+ *     devlink_region_destroy - destroy address region
+ *
+ *     @region: devlink region to destroy
+ */
+void devlink_region_destroy(struct devlink_region *region)
+{
+       struct devlink *devlink = region->devlink;
+       struct devlink_snapshot *snapshot, *ts;
+
+       mutex_lock(&devlink->lock);
+
+       /* Free all snapshots of region */
+       list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
+               devlink_region_snapshot_del(snapshot);
+
+       list_del(&region->list);
+
+       devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
+       mutex_unlock(&devlink->lock);
+       kfree(region);
+}
+EXPORT_SYMBOL_GPL(devlink_region_destroy);
+
+/**
+ *     devlink_region_shapshot_id_get - get snapshot ID
+ *
+ *     This callback should be called when adding a new snapshot,
+ *     Driver should use the same id for multiple snapshots taken
+ *     on multiple regions at the same time/by the same trigger.
+ *
+ *     @devlink: devlink
+ */
+u32 devlink_region_shapshot_id_get(struct devlink *devlink)
+{
+       u32 id;
+
+       mutex_lock(&devlink->lock);
+       id = ++devlink->snapshot_id;
+       mutex_unlock(&devlink->lock);
+
+       return id;
+}
+EXPORT_SYMBOL_GPL(devlink_region_shapshot_id_get);
+
+/**
+ *     devlink_region_snapshot_create - create a new snapshot
+ *     This will add a new snapshot of a region. The snapshot
+ *     will be stored on the region struct and can be accessed
+ *     from devlink. This is useful for future analyses of snapshots.
+ *     Multiple snapshots can be created on a region.
+ *     The @snapshot_id should be obtained using the getter function.
+ *
+ *     @devlink_region: devlink region of the snapshot
+ *     @data_len: size of snapshot data
+ *     @data: snapshot data
+ *     @snapshot_id: snapshot id to be created
+ *     @data_destructor: pointer to destructor function to free data
+ */
+int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
+                                  u8 *data, u32 snapshot_id,
+                                  devlink_snapshot_data_dest_t *data_destructor)
+{
+       struct devlink *devlink = region->devlink;
+       struct devlink_snapshot *snapshot;
+       int err;
+
+       mutex_lock(&devlink->lock);
+
+       /* check if region can hold one more snapshot */
+       if (region->cur_snapshots == region->max_snapshots) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
+               err = -EEXIST;
+               goto unlock;
+       }
+
+       snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+       if (!snapshot) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       snapshot->id = snapshot_id;
+       snapshot->region = region;
+       snapshot->data = data;
+       snapshot->data_len = data_len;
+       snapshot->data_destructor = data_destructor;
+
+       list_add_tail(&snapshot->list, &region->snapshot_list);
+
+       region->cur_snapshots++;
+
+       devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW);
+       mutex_unlock(&devlink->lock);
+       return 0;
+
+unlock:
+       mutex_unlock(&devlink->lock);
+       return err;
+}
+EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
+
 static int __init devlink_module_init(void)
 {
        return genl_register_family(&devlink_nl_family);
index e677a20180cf304a27154d12c338da046c96a546..c9993c6c2fd4f492d5113d9c328c7bf3ddcfa9f3 100644 (file)
@@ -111,6 +111,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] =       "rx-udp_tunnel-port-offload",
        [NETIF_F_HW_TLS_RECORD_BIT] =   "tls-hw-record",
        [NETIF_F_HW_TLS_TX_BIT] =        "tls-hw-tx-offload",
+       [NETIF_F_HW_TLS_RX_BIT] =        "tls-hw-rx-offload",
 };
 
 static const char
index 126ffc5bc630cb412e4bcf1a48869ec6711fda54..f64aa13811eaeedf8f0040bc9f993ad9e1661eca 100644 (file)
@@ -416,6 +416,14 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
                if (rule->mark && r->mark != rule->mark)
                        continue;
 
+               if (rule->suppress_ifgroup != -1 &&
+                   r->suppress_ifgroup != rule->suppress_ifgroup)
+                       continue;
+
+               if (rule->suppress_prefixlen != -1 &&
+                   r->suppress_prefixlen != rule->suppress_prefixlen)
+                       continue;
+
                if (rule->mark_mask && r->mark_mask != rule->mark_mask)
                        continue;
 
@@ -436,6 +444,9 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
                if (rule->ip_proto && r->ip_proto != rule->ip_proto)
                        continue;
 
+               if (rule->proto && r->proto != rule->proto)
+                       continue;
+
                if (fib_rule_port_range_set(&rule->sport_range) &&
                    !fib_rule_port_range_compare(&r->sport_range,
                                                 &rule->sport_range))
@@ -645,6 +656,73 @@ static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh,
        return err;
 }
 
+static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
+                      struct nlattr **tb, struct fib_rule *rule)
+{
+       struct fib_rule *r;
+
+       list_for_each_entry(r, &ops->rules_list, list) {
+               if (r->action != rule->action)
+                       continue;
+
+               if (r->table != rule->table)
+                       continue;
+
+               if (r->pref != rule->pref)
+                       continue;
+
+               if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
+                       continue;
+
+               if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
+                       continue;
+
+               if (r->mark != rule->mark)
+                       continue;
+
+               if (r->suppress_ifgroup != rule->suppress_ifgroup)
+                       continue;
+
+               if (r->suppress_prefixlen != rule->suppress_prefixlen)
+                       continue;
+
+               if (r->mark_mask != rule->mark_mask)
+                       continue;
+
+               if (r->tun_id != rule->tun_id)
+                       continue;
+
+               if (r->fr_net != rule->fr_net)
+                       continue;
+
+               if (r->l3mdev != rule->l3mdev)
+                       continue;
+
+               if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
+                   !uid_eq(r->uid_range.end, rule->uid_range.end))
+                       continue;
+
+               if (r->ip_proto != rule->ip_proto)
+                       continue;
+
+               if (r->proto != rule->proto)
+                       continue;
+
+               if (!fib_rule_port_range_compare(&r->sport_range,
+                                                &rule->sport_range))
+                       continue;
+
+               if (!fib_rule_port_range_compare(&r->dport_range,
+                                                &rule->dport_range))
+                       continue;
+
+               if (!ops->compare(r, frh, tb))
+                       continue;
+               return 1;
+       }
+       return 0;
+}
+
 int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
                   struct netlink_ext_ack *extack)
 {
@@ -679,7 +757,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto errout;
 
        if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
-           rule_find(ops, frh, tb, rule, user_priority)) {
+           rule_exists(ops, frh, tb, rule)) {
                err = -EEXIST;
                goto errout_free;
        }
index 3d9ba7e5965adc4658b379a0cf55ff2f22f4b94d..b9ec916f4e3a684574083de1095771d8878a0349 100644 (file)
@@ -3214,20 +3214,6 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
 }
 EXPORT_SYMBOL_GPL(xdp_do_redirect);
 
-static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
-{
-       unsigned int len;
-
-       if (unlikely(!(fwd->flags & IFF_UP)))
-               return -ENETDOWN;
-
-       len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
-       if (skb->len > len)
-               return -EMSGSIZE;
-
-       return 0;
-}
-
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
                                       struct xdp_buff *xdp,
@@ -3256,10 +3242,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
        }
 
        if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
-               if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+               struct bpf_dtab_netdev *dst = fwd;
+
+               err = dev_map_generic_redirect(dst, skb, xdp_prog);
+               if (unlikely(err))
                        goto err;
-               skb->dev = fwd;
-               generic_xdp_tx(skb, xdp_prog);
        } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
                struct xdp_sock *xs = fwd;
 
@@ -3595,7 +3582,7 @@ BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
        if (unlikely(size > IP_TUNNEL_OPTS_MAX))
                return -ENOMEM;
 
-       ip_tunnel_info_opts_set(info, from, size);
+       ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
 
        return 0;
 }
@@ -4086,8 +4073,9 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
        memcpy(params->smac, dev->dev_addr, ETH_ALEN);
        params->h_vlan_TCI = 0;
        params->h_vlan_proto = 0;
+       params->ifindex = dev->ifindex;
 
-       return dev->ifindex;
+       return 0;
 }
 #endif
 
@@ -4111,7 +4099,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        /* verify forwarding is enabled on this interface */
        in_dev = __in_dev_get_rcu(dev);
        if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
-               return 0;
+               return BPF_FIB_LKUP_RET_FWD_DISABLED;
 
        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
                fl4.flowi4_iif = 1;
@@ -4136,7 +4124,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
                tb = fib_get_table(net, tbid);
                if (unlikely(!tb))
-                       return 0;
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
 
                err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
        } else {
@@ -4148,8 +4136,20 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
                err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
        }
 
-       if (err || res.type != RTN_UNICAST)
-               return 0;
+       if (err) {
+               /* map fib lookup errors to RTN_ type */
+               if (err == -EINVAL)
+                       return BPF_FIB_LKUP_RET_BLACKHOLE;
+               if (err == -EHOSTUNREACH)
+                       return BPF_FIB_LKUP_RET_UNREACHABLE;
+               if (err == -EACCES)
+                       return BPF_FIB_LKUP_RET_PROHIBIT;
+
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
+       }
+
+       if (res.type != RTN_UNICAST)
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        if (res.fi->fib_nhs > 1)
                fib_select_path(net, &res, &fl4, NULL);
@@ -4157,19 +4157,16 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        if (check_mtu) {
                mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
                if (params->tot_len > mtu)
-                       return 0;
+                       return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
        nh = &res.fi->fib_nh[res.nh_sel];
 
        /* do not handle lwt encaps right now */
        if (nh->nh_lwtstate)
-               return 0;
+               return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
        dev = nh->nh_dev;
-       if (unlikely(!dev))
-               return 0;
-
        if (nh->nh_gw)
                params->ipv4_dst = nh->nh_gw;
 
@@ -4179,10 +4176,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
         * rcu_read_lock_bh is not needed here
         */
        neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
-       if (neigh)
-               return bpf_fib_set_fwd_params(params, neigh, dev);
+       if (!neigh)
+               return BPF_FIB_LKUP_RET_NO_NEIGH;
 
-       return 0;
+       return bpf_fib_set_fwd_params(params, neigh, dev);
 }
 #endif
 
@@ -4203,7 +4200,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
        /* link local addresses are never forwarded */
        if (rt6_need_strict(dst) || rt6_need_strict(src))
-               return 0;
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        dev = dev_get_by_index_rcu(net, params->ifindex);
        if (unlikely(!dev))
@@ -4211,7 +4208,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
        idev = __in6_dev_get_safely(dev);
        if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
-               return 0;
+               return BPF_FIB_LKUP_RET_FWD_DISABLED;
 
        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
                fl6.flowi6_iif = 1;
@@ -4238,7 +4235,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
                tb = ipv6_stub->fib6_get_table(net, tbid);
                if (unlikely(!tb))
-                       return 0;
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
 
                f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
        } else {
@@ -4251,11 +4248,23 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        }
 
        if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
-               return 0;
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
+
+       if (unlikely(f6i->fib6_flags & RTF_REJECT)) {
+               switch (f6i->fib6_type) {
+               case RTN_BLACKHOLE:
+                       return BPF_FIB_LKUP_RET_BLACKHOLE;
+               case RTN_UNREACHABLE:
+                       return BPF_FIB_LKUP_RET_UNREACHABLE;
+               case RTN_PROHIBIT:
+                       return BPF_FIB_LKUP_RET_PROHIBIT;
+               default:
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
+               }
+       }
 
-       if (unlikely(f6i->fib6_flags & RTF_REJECT ||
-           f6i->fib6_type != RTN_UNICAST))
-               return 0;
+       if (f6i->fib6_type != RTN_UNICAST)
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
                f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
@@ -4265,11 +4274,11 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        if (check_mtu) {
                mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src);
                if (params->tot_len > mtu)
-                       return 0;
+                       return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
        if (f6i->fib6_nh.nh_lwtstate)
-               return 0;
+               return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
        if (f6i->fib6_flags & RTF_GATEWAY)
                *dst = f6i->fib6_nh.nh_gw;
@@ -4283,10 +4292,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
         */
        neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
                                      ndisc_hashfn, dst, dev);
-       if (neigh)
-               return bpf_fib_set_fwd_params(params, neigh, dev);
+       if (!neigh)
+               return BPF_FIB_LKUP_RET_NO_NEIGH;
 
-       return 0;
+       return bpf_fib_set_fwd_params(params, neigh, dev);
 }
 #endif
 
@@ -4328,7 +4337,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
           struct bpf_fib_lookup *, params, int, plen, u32, flags)
 {
        struct net *net = dev_net(skb->dev);
-       int index = -EAFNOSUPPORT;
+       int rc = -EAFNOSUPPORT;
 
        if (plen < sizeof(*params))
                return -EINVAL;
@@ -4339,25 +4348,25 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
        switch (params->family) {
 #if IS_ENABLED(CONFIG_INET)
        case AF_INET:
-               index = bpf_ipv4_fib_lookup(net, params, flags, false);
+               rc = bpf_ipv4_fib_lookup(net, params, flags, false);
                break;
 #endif
 #if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               index = bpf_ipv6_fib_lookup(net, params, flags, false);
+               rc = bpf_ipv6_fib_lookup(net, params, flags, false);
                break;
 #endif
        }
 
-       if (index > 0) {
+       if (!rc) {
                struct net_device *dev;
 
-               dev = dev_get_by_index_rcu(net, index);
+               dev = dev_get_by_index_rcu(net, params->ifindex);
                if (!is_skb_forwardable(dev, skb))
-                       index = 0;
+                       rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
-       return index;
+       return rc;
 }
 
 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
@@ -4651,6 +4660,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_trace_printk:
                if (capable(CAP_SYS_ADMIN))
                        return bpf_get_trace_printk_proto();
+               /* else: fall through */
        default:
                return NULL;
        }
index 53f96e4f7bf593863b584e050eb7628d4023718a..b555fc229e967f75e9ac83aaa7ec392caadc2895 100644 (file)
@@ -589,7 +589,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
        struct flow_dissector_key_tags *key_tags;
        struct flow_dissector_key_vlan *key_vlan;
        enum flow_dissect_ret fdret;
-       bool skip_vlan = false;
+       enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
        int num_hdrs = 0;
        u8 ip_proto = 0;
        bool ret;
@@ -748,14 +748,14 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
        }
        case htons(ETH_P_8021AD):
        case htons(ETH_P_8021Q): {
-               const struct vlan_hdr *vlan;
+               const struct vlan_hdr *vlan = NULL;
                struct vlan_hdr _vlan;
-               bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
+               __be16 saved_vlan_tpid = proto;
 
-               if (vlan_tag_present)
+               if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
+                   skb && skb_vlan_tag_present(skb)) {
                        proto = skb->protocol;
-
-               if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
+               } else {
                        vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
                                                    data, hlen, &_vlan);
                        if (!vlan) {
@@ -765,20 +765,23 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
 
                        proto = vlan->h_vlan_encapsulated_proto;
                        nhoff += sizeof(*vlan);
-                       if (skip_vlan) {
-                               fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
-                               break;
-                       }
                }
 
-               skip_vlan = true;
-               if (dissector_uses_key(flow_dissector,
-                                      FLOW_DISSECTOR_KEY_VLAN)) {
+               if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
+                       dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
+               } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
+                       dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
+               } else {
+                       fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
+                       break;
+               }
+
+               if (dissector_uses_key(flow_dissector, dissector_vlan)) {
                        key_vlan = skb_flow_dissector_target(flow_dissector,
-                                                            FLOW_DISSECTOR_KEY_VLAN,
+                                                            dissector_vlan,
                                                             target_container);
 
-                       if (vlan_tag_present) {
+                       if (!vlan) {
                                key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
                                key_vlan->vlan_priority =
                                        (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
@@ -789,6 +792,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
                                        (ntohs(vlan->h_vlan_TCI) &
                                         VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
                        }
+                       key_vlan->vlan_tpid = saved_vlan_tpid;
                }
 
                fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
index 8e3fda9e725cba97973ed2ce85ebe6f2e926e7cf..cbe85d8d4cc21239288a09927f60a08642781c3d 100644 (file)
@@ -1148,7 +1148,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
                neigh->nud_state = new;
                err = 0;
                notify = old & NUD_VALID;
-               if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
+               if (((old & (NUD_INCOMPLETE | NUD_PROBE)) ||
+                    (flags & NEIGH_UPDATE_F_ADMIN)) &&
                    (new & NUD_FAILED)) {
                        neigh_invalidate(neigh);
                        notify = 1;
index bb7e80f4ced3746dc6946cfbc3e71520db503e50..ffa1d18f2c2ce4915ed741515885dfebf6996ae3 100644 (file)
@@ -1047,13 +1047,30 @@ static ssize_t traffic_class_show(struct netdev_queue *queue,
                                  char *buf)
 {
        struct net_device *dev = queue->dev;
-       int index = get_netdev_queue_index(queue);
-       int tc = netdev_txq_to_tc(dev, index);
+       int index;
+       int tc;
 
+       if (!netif_is_multiqueue(dev))
+               return -ENOENT;
+
+       index = get_netdev_queue_index(queue);
+
+       /* If queue belongs to subordinate dev use its TC mapping */
+       dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
+
+       tc = netdev_txq_to_tc(dev, index);
        if (tc < 0)
                return -EINVAL;
 
-       return sprintf(buf, "%u\n", tc);
+       /* We can report the traffic class one of two ways:
+        * Subordinate device traffic classes are reported with the traffic
+        * class first, and then the subordinate class so for example TC0 on
+        * subordinate device 2 will be reported as "0-2". If the queue
+        * belongs to the root device it will be reported with just the
+        * traffic class, so just "0" for TC 0 for example.
+        */
+       return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) :
+                                sprintf(buf, "%u\n", tc);
 }
 
 #ifdef CONFIG_XPS
@@ -1214,10 +1231,20 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
        cpumask_var_t mask;
        unsigned long index;
 
+       if (!netif_is_multiqueue(dev))
+               return -ENOENT;
+
        index = get_netdev_queue_index(queue);
 
        if (dev->num_tc) {
+               /* Do not allow XPS on subordinate device directly */
                num_tc = dev->num_tc;
+               if (num_tc < 0)
+                       return -EINVAL;
+
+               /* If queue belongs to subordinate dev use its map */
+               dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
+
                tc = netdev_txq_to_tc(dev, index);
                if (tc < 0)
                        return -EINVAL;
@@ -1227,13 +1254,13 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
                return -ENOMEM;
 
        rcu_read_lock();
-       dev_maps = rcu_dereference(dev->xps_maps);
+       dev_maps = rcu_dereference(dev->xps_cpus_map);
        if (dev_maps) {
                for_each_possible_cpu(cpu) {
                        int i, tci = cpu * num_tc + tc;
                        struct xps_map *map;
 
-                       map = rcu_dereference(dev_maps->cpu_map[tci]);
+                       map = rcu_dereference(dev_maps->attr_map[tci]);
                        if (!map)
                                continue;
 
@@ -1260,6 +1287,9 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
        cpumask_var_t mask;
        int err;
 
+       if (!netif_is_multiqueue(dev))
+               return -ENOENT;
+
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
@@ -1283,6 +1313,88 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
 
 static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
        = __ATTR_RW(xps_cpus);
+
+static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
+{
+       struct net_device *dev = queue->dev;
+       struct xps_dev_maps *dev_maps;
+       unsigned long *mask, index;
+       int j, len, num_tc = 1, tc = 0;
+
+       index = get_netdev_queue_index(queue);
+
+       if (dev->num_tc) {
+               num_tc = dev->num_tc;
+               tc = netdev_txq_to_tc(dev, index);
+               if (tc < 0)
+                       return -EINVAL;
+       }
+       mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
+                      GFP_KERNEL);
+       if (!mask)
+               return -ENOMEM;
+
+       rcu_read_lock();
+       dev_maps = rcu_dereference(dev->xps_rxqs_map);
+       if (!dev_maps)
+               goto out_no_maps;
+
+       for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues),
+            j < dev->num_rx_queues;) {
+               int i, tci = j * num_tc + tc;
+               struct xps_map *map;
+
+               map = rcu_dereference(dev_maps->attr_map[tci]);
+               if (!map)
+                       continue;
+
+               for (i = map->len; i--;) {
+                       if (map->queues[i] == index) {
+                               set_bit(j, mask);
+                               break;
+                       }
+               }
+       }
+out_no_maps:
+       rcu_read_unlock();
+
+       len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
+       kfree(mask);
+
+       return len < PAGE_SIZE ? len : -EINVAL;
+}
+
+static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
+                             size_t len)
+{
+       struct net_device *dev = queue->dev;
+       struct net *net = dev_net(dev);
+       unsigned long *mask, index;
+       int err;
+
+       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+               return -EPERM;
+
+       mask = kcalloc(BITS_TO_LONGS(dev->num_rx_queues), sizeof(long),
+                      GFP_KERNEL);
+       if (!mask)
+               return -ENOMEM;
+
+       index = get_netdev_queue_index(queue);
+
+       err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
+       if (err) {
+               kfree(mask);
+               return err;
+       }
+
+       err = __netif_set_xps_queue(dev, mask, index, true);
+       kfree(mask);
+       return err ? : len;
+}
+
+static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init
+       = __ATTR_RW(xps_rxqs);
 #endif /* CONFIG_XPS */
 
 static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
@@ -1290,6 +1402,7 @@ static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
        &queue_traffic_class.attr,
 #ifdef CONFIG_XPS
        &xps_cpus_attribute.attr,
+       &xps_rxqs_attribute.attr,
        &queue_tx_maxrate.attr,
 #endif
        NULL
index 5ef61222fdef1f305909eeca6ac278bcac88e1b0..e03258e954c8d10781c516445cfbef00fd11972e 100644 (file)
@@ -964,7 +964,8 @@ static size_t rtnl_xdp_size(void)
 {
        size_t xdp_size = nla_total_size(0) +   /* nest IFLA_XDP */
                          nla_total_size(1) +   /* XDP_ATTACHED */
-                         nla_total_size(4);    /* XDP_PROG_ID */
+                         nla_total_size(4) +   /* XDP_PROG_ID (or 1st mode) */
+                         nla_total_size(4);    /* XDP_<mode>_PROG_ID */
 
        return xdp_size;
 }
@@ -1353,27 +1354,51 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
        return 0;
 }
 
-static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
+static u32 rtnl_xdp_prog_skb(struct net_device *dev)
 {
-       const struct net_device_ops *ops = dev->netdev_ops;
        const struct bpf_prog *generic_xdp_prog;
-       struct netdev_bpf xdp;
 
        ASSERT_RTNL();
 
-       *prog_id = 0;
        generic_xdp_prog = rtnl_dereference(dev->xdp_prog);
-       if (generic_xdp_prog) {
-               *prog_id = generic_xdp_prog->aux->id;
-               return XDP_ATTACHED_SKB;
-       }
-       if (!ops->ndo_bpf)
-               return XDP_ATTACHED_NONE;
+       if (!generic_xdp_prog)
+               return 0;
+       return generic_xdp_prog->aux->id;
+}
 
-       __dev_xdp_query(dev, ops->ndo_bpf, &xdp);
-       *prog_id = xdp.prog_id;
+static u32 rtnl_xdp_prog_drv(struct net_device *dev)
+{
+       return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, XDP_QUERY_PROG);
+}
 
-       return xdp.prog_attached;
+static u32 rtnl_xdp_prog_hw(struct net_device *dev)
+{
+       return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf,
+                              XDP_QUERY_PROG_HW);
+}
+
+static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
+                              u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
+                              u32 (*get_prog_id)(struct net_device *dev))
+{
+       u32 curr_id;
+       int err;
+
+       curr_id = get_prog_id(dev);
+       if (!curr_id)
+               return 0;
+
+       *prog_id = curr_id;
+       err = nla_put_u32(skb, attr, curr_id);
+       if (err)
+               return err;
+
+       if (*mode != XDP_ATTACHED_NONE)
+               *mode = XDP_ATTACHED_MULTI;
+       else
+               *mode = tgt_mode;
+
+       return 0;
 }
 
 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1381,17 +1406,29 @@ static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
        struct nlattr *xdp;
        u32 prog_id;
        int err;
+       u8 mode;
 
        xdp = nla_nest_start(skb, IFLA_XDP);
        if (!xdp)
                return -EMSGSIZE;
 
-       err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
-                        rtnl_xdp_attached_mode(dev, &prog_id));
+       prog_id = 0;
+       mode = XDP_ATTACHED_NONE;
+       if (rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
+                               IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb))
+               goto err_cancel;
+       if (rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
+                               IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv))
+               goto err_cancel;
+       if (rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
+                               IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw))
+               goto err_cancel;
+
+       err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
        if (err)
                goto err_cancel;
 
-       if (prog_id) {
+       if (prog_id && mode != XDP_ATTACHED_MULTI) {
                err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
                if (err)
                        goto err_cancel;
@@ -2759,9 +2796,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
                        return err;
        }
 
-       dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-
-       __dev_notify_flags(dev, old_flags, ~0U);
+       if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
+               __dev_notify_flags(dev, old_flags, 0U);
+       } else {
+               dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
+               __dev_notify_flags(dev, old_flags, ~0U);
+       }
        return 0;
 }
 EXPORT_SYMBOL(rtnl_configure_link);
index c642304f178ce0a4e1358d59e45032a39f76fb3f..cfd6c6f35f9c585533199ee8375e6c29de452d12 100644 (file)
@@ -805,6 +805,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
         * It is not yet because we do not want to have a 16 bit hole
         */
        new->queue_mapping = old->queue_mapping;
+#ifdef CONFIG_TLS_DEVICE
+       new->decrypted = old->decrypted;
+#endif
 
        memcpy(&new->headers_start, &old->headers_start,
               offsetof(struct sk_buff, headers_end) -
@@ -865,6 +868,9 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
        C(head_frag);
        C(data);
        C(truesize);
+#ifdef CONFIG_TLS_DEVICE
+       C(decrypted);
+#endif
        refcount_set(&n->users, 1);
 
        atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -3815,14 +3821,14 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
 }
 EXPORT_SYMBOL_GPL(skb_segment);
 
-int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
 {
        struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
        unsigned int offset = skb_gro_offset(skb);
        unsigned int headlen = skb_headlen(skb);
        unsigned int len = skb_gro_len(skb);
-       struct sk_buff *lp, *p = *head;
        unsigned int delta_truesize;
+       struct sk_buff *lp;
 
        if (unlikely(p->len + len >= 65536))
                return -E2BIG;
@@ -4898,7 +4904,6 @@ EXPORT_SYMBOL(skb_try_coalesce);
  */
 void skb_scrub_packet(struct sk_buff *skb, bool xnet)
 {
-       skb->tstamp = 0;
        skb->pkt_type = PACKET_HOST;
        skb->skb_iif = 0;
        skb->ignore_df = 0;
@@ -4911,8 +4916,8 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
                return;
 
        ipvs_reset(skb);
-       skb_orphan(skb);
        skb->mark = 0;
+       skb->tstamp = 0;
 }
 EXPORT_SYMBOL_GPL(skb_scrub_packet);
 
@@ -5276,8 +5281,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
                        if (npages >= 1 << order) {
                                page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
                                                   __GFP_COMP |
-                                                  __GFP_NOWARN |
-                                                  __GFP_NORETRY,
+                                                  __GFP_NOWARN,
                                                   order);
                                if (page)
                                        goto fill_page;
index bcc41829a16d50714bdd3c25c976c0b7296fab84..03fdea5b0f575945a58fd14b546226d61ccd4988 100644 (file)
@@ -91,6 +91,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <asm/unaligned.h>
 #include <linux/capability.h>
 #include <linux/errno.h>
 #include <linux/errqueue.h>
@@ -697,6 +698,7 @@ EXPORT_SYMBOL(sk_mc_loop);
 int sock_setsockopt(struct socket *sock, int level, int optname,
                    char __user *optval, unsigned int optlen)
 {
+       struct sock_txtime sk_txtime;
        struct sock *sk = sock->sk;
        int val;
        int valbool;
@@ -1070,6 +1072,26 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
                }
                break;
 
+       case SO_TXTIME:
+               if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+                       ret = -EPERM;
+               } else if (optlen != sizeof(struct sock_txtime)) {
+                       ret = -EINVAL;
+               } else if (copy_from_user(&sk_txtime, optval,
+                          sizeof(struct sock_txtime))) {
+                       ret = -EFAULT;
+               } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
+                       ret = -EINVAL;
+               } else {
+                       sock_valbool_flag(sk, SOCK_TXTIME, true);
+                       sk->sk_clockid = sk_txtime.clockid;
+                       sk->sk_txtime_deadline_mode =
+                               !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
+                       sk->sk_txtime_report_errors =
+                               !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
+               }
+               break;
+
        default:
                ret = -ENOPROTOOPT;
                break;
@@ -1115,6 +1137,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                u64 val64;
                struct linger ling;
                struct timeval tm;
+               struct sock_txtime txtime;
        } v;
 
        int lv = sizeof(int);
@@ -1403,6 +1426,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                v.val = sock_flag(sk, SOCK_ZEROCOPY);
                break;
 
+       case SO_TXTIME:
+               lv = sizeof(v.txtime);
+               v.txtime.clockid = sk->sk_clockid;
+               v.txtime.flags |= sk->sk_txtime_deadline_mode ?
+                                 SOF_TXTIME_DEADLINE_MODE : 0;
+               v.txtime.flags |= sk->sk_txtime_report_errors ?
+                                 SOF_TXTIME_REPORT_ERRORS : 0;
+               break;
+
        default:
                /* We implement the SO_SNDLOWAT etc to not be settable
                 * (1003.1g 7).
@@ -2137,6 +2169,13 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
                sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
                sockc->tsflags |= tsflags;
                break;
+       case SCM_TXTIME:
+               if (!sock_flag(sk, SOCK_TXTIME))
+                       return -EINVAL;
+               if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
+                       return -EINVAL;
+               sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
+               break;
        /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
        case SCM_RIGHTS:
        case SCM_CREDENTIALS:
@@ -2401,9 +2440,10 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
 {
        struct proto *prot = sk->sk_prot;
        long allocated = sk_memory_allocated_add(sk, amt);
+       bool charged = true;
 
        if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
-           !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
+           !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
                goto suppress_allocation;
 
        /* Under limit. */
@@ -2461,7 +2501,8 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
                        return 1;
        }
 
-       trace_sock_exceed_buf_limit(sk, prot, allocated);
+       if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
+               trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
 
        sk_memory_allocated_sub(sk, amt);
 
@@ -2818,6 +2859,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        sk->sk_pacing_rate = ~0U;
        sk->sk_pacing_shift = 10;
        sk->sk_incoming_cpu = -1;
+
+       sk_rx_queue_clear(sk);
        /*
         * Before updating sk_refcnt, we must commit prior changes to memory
         * (Documentation/RCU/rculist_nulls.txt for details)
@@ -3243,7 +3286,8 @@ static int req_prot_init(const struct proto *prot)
 
        rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
                                           rsk_prot->obj_size, 0,
-                                          prot->slab_flags, NULL);
+                                          SLAB_ACCOUNT | prot->slab_flags,
+                                          NULL);
 
        if (!rsk_prot->slab) {
                pr_crit("%s: Can't create request sock SLAB cache!\n",
@@ -3258,7 +3302,8 @@ int proto_register(struct proto *prot, int alloc_slab)
        if (alloc_slab) {
                prot->slab = kmem_cache_create_usercopy(prot->name,
                                        prot->obj_size, 0,
-                                       SLAB_HWCACHE_ALIGN | prot->slab_flags,
+                                       SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
+                                       prot->slab_flags,
                                        prot->useroffset, prot->usersize,
                                        NULL);
 
@@ -3281,6 +3326,7 @@ int proto_register(struct proto *prot, int alloc_slab)
                                kmem_cache_create(prot->twsk_prot->twsk_slab_name,
                                                  prot->twsk_prot->twsk_obj_size,
                                                  0,
+                                                 SLAB_ACCOUNT |
                                                  prot->slab_flags,
                                                  NULL);
                        if (prot->twsk_prot->twsk_slab == NULL)
index 9d1f22072d5d5b887b2a0e0bcc35af936ce38778..57285383ed00c9b3ab7616f4adddfca0d525f850 100644 (file)
@@ -3,8 +3,11 @@
  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
  * Released under terms in GPL version 2.  See COPYING.
  */
+#include <linux/bpf.h>
+#include <linux/filter.h>
 #include <linux/types.h>
 #include <linux/mm.h>
+#include <linux/netdevice.h>
 #include <linux/slab.h>
 #include <linux/idr.h>
 #include <linux/rhashtable.h>
@@ -45,8 +48,8 @@ static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
        BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id)
                     != sizeof(u32));
 
-       /* Use cyclic increasing ID as direct hash key, see rht_bucket_index */
-       return key << RHT_HASH_RESERVED_SPACE;
+       /* Use cyclic increasing ID as direct hash key */
+       return key;
 }
 
 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
@@ -370,3 +373,34 @@ void xdp_return_buff(struct xdp_buff *xdp)
        __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle);
 }
 EXPORT_SYMBOL_GPL(xdp_return_buff);
+
+int xdp_attachment_query(struct xdp_attachment_info *info,
+                        struct netdev_bpf *bpf)
+{
+       bpf->prog_id = info->prog ? info->prog->aux->id : 0;
+       bpf->prog_flags = info->prog ? info->flags : 0;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(xdp_attachment_query);
+
+bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
+                            struct netdev_bpf *bpf)
+{
+       if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
+               NL_SET_ERR_MSG(bpf->extack,
+                              "program loaded with different flags");
+               return false;
+       }
+       return true;
+}
+EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);
+
+void xdp_attachment_setup(struct xdp_attachment_info *info,
+                         struct netdev_bpf *bpf)
+{
+       if (info->prog)
+               bpf_prog_put(info->prog);
+       info->prog = bpf->prog;
+       info->flags = bpf->flags;
+}
+EXPORT_SYMBOL_GPL(xdp_attachment_setup);
index 8b5ba6dffac7ebc88fd21075793dc3db43a74a43..12877a1514e7b8e873cd26529e58f7ebaae99c1a 100644 (file)
@@ -600,7 +600,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
 {
        struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
-       ktime_t now = ktime_get_real();
+       ktime_t now = ktime_get();
        s64 delta = 0;
 
        switch (fbtype) {
@@ -625,15 +625,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
        case CCID3_FBACK_PERIODIC:
                delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
                if (delta <= 0)
-                       DCCP_BUG("delta (%ld) <= 0", (long)delta);
-               else
-                       hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+                       delta = 1;
+               hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
                break;
        default:
                return;
        }
 
-       ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
+       ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
                       hc->rx_x_recv, hc->rx_pinv);
 
        hc->rx_tstamp_last_feedback = now;
@@ -680,7 +679,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
 static u32 ccid3_first_li(struct sock *sk)
 {
        struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
-       u32 x_recv, p, delta;
+       u32 x_recv, p;
+       s64 delta;
        u64 fval;
 
        if (hc->rx_rtt == 0) {
@@ -688,7 +688,9 @@ static u32 ccid3_first_li(struct sock *sk)
                hc->rx_rtt = DCCP_FALLBACK_RTT;
        }
 
-       delta  = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
+       delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
+       if (delta <= 0)
+               delta = 1;
        x_recv = scaled_div32(hc->rx_bytes_recv, delta);
        if (x_recv == 0) {              /* would also trigger divide-by-zero */
                DCCP_WARN("X_recv==0\n");
index 0ea2ee56ac1bee6948ee4ed37c8172b300a7f9de..f91e3816806baae37e0e0793dcef72e8b291777e 100644 (file)
@@ -316,7 +316,8 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                 int flags, int *addr_len);
 void dccp_shutdown(struct sock *sk, int how);
 int inet_dccp_listen(struct socket *sock, int backlog);
-__poll_t dccp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait);
 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 void dccp_req_err(struct sock *sk, u64 seq);
 
index a9e478cd3787c90f3d81e3bc2f71a14f7b11e280..b08feb219b44b67eadf408a33649d8c7ec9db2d0 100644 (file)
@@ -984,7 +984,7 @@ static const struct proto_ops inet_dccp_ops = {
        .accept            = inet_accept,
        .getname           = inet_getname,
        /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
-       .poll_mask         = dccp_poll_mask,
+       .poll              = dccp_poll,
        .ioctl             = inet_ioctl,
        /* FIXME: work on inet_listen to rename it to sock_common_listen */
        .listen            = inet_dccp_listen,
index 17fc4e0166ba89ed435dc65bbdd5951d9018c093..6344f1b18a6a1b30cd2f3c559987a2c9e9546f81 100644 (file)
@@ -1070,7 +1070,7 @@ static const struct proto_ops inet6_dccp_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet6_getname,
-       .poll_mask         = dccp_poll_mask,
+       .poll              = dccp_poll,
        .ioctl             = inet6_ioctl,
        .listen            = inet_dccp_listen,
        .shutdown          = inet_shutdown,
index ca21c1c76da013575d5bd0c8b3a4ac42eb2b229b..0d56e36a6db7b77dcdeb9697dd81bf62895e6e4c 100644 (file)
@@ -312,11 +312,20 @@ int dccp_disconnect(struct sock *sk, int flags)
 
 EXPORT_SYMBOL_GPL(dccp_disconnect);
 
-__poll_t dccp_poll_mask(struct socket *sock, __poll_t events)
+/*
+ *     Wait for a DCCP event.
+ *
+ *     Note that we don't need to lock the socket, as the upper poll layers
+ *     take care of normal races (between the test and the event) and we don't
+ *     go look at any of the socket buffers directly.
+ */
+__poll_t dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait)
 {
        __poll_t mask;
        struct sock *sk = sock->sk;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
        if (sk->sk_state == DCCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
@@ -358,7 +367,7 @@ __poll_t dccp_poll_mask(struct socket *sock, __poll_t events)
        return mask;
 }
 
-EXPORT_SYMBOL_GPL(dccp_poll_mask);
+EXPORT_SYMBOL_GPL(dccp_poll);
 
 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
index 9a686d890bfad179c09a182245a96bba5dba21ea..7d6ff983ba2cbbf7915a61ffad57e52f66f3a193 100644 (file)
@@ -1207,11 +1207,11 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer)
 }
 
 
-static __poll_t dn_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table  *wait)
 {
        struct sock *sk = sock->sk;
        struct dn_scp *scp = DN_SK(sk);
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
 
        if (!skb_queue_empty(&scp->other_receive_queue))
                mask |= EPOLLRDBAND;
@@ -2331,7 +2331,7 @@ static const struct proto_ops dn_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       dn_accept,
        .getname =      dn_getname,
-       .poll_mask =    dn_poll_mask,
+       .poll =         dn_poll,
        .ioctl =        dn_ioctl,
        .listen =       dn_listen,
        .shutdown =     dn_shutdown,
index 1b2120645730577de9156215bee70c9a2b9e59e2..34aba55ed573db914231c4933429e48b24908c01 100644 (file)
@@ -491,6 +491,7 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
                break;
        case DN_RUN:
                sk->sk_shutdown |= SHUTDOWN_MASK;
+               /* fall through */
        case DN_CC:
                scp->state = DN_CN;
        }
index 1e3b6a6d8a40dcf69200ead186a6ab8919e63db6..71536c4351327e426a4e901bf84edfd94e07afd7 100644 (file)
@@ -900,7 +900,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
 
        switch (f->command) {
        case TC_BLOCK_BIND:
-               return tcf_block_cb_register(f->block, cb, dev, dev);
+               return tcf_block_cb_register(f->block, cb, dev, dev, f->extack);
        case TC_BLOCK_UNBIND:
                tcf_block_cb_unregister(f->block, cb, dev);
                return 0;
index ee28440f57c58f4eec29e67641a49efcbd36c8cd..fd8faa0dfa6193a186e562dae33008b4a0dc2182 100644 (file)
@@ -427,13 +427,13 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
 }
 EXPORT_SYMBOL(sysfs_format_mac);
 
-struct sk_buff **eth_gro_receive(struct sk_buff **head,
-                                struct sk_buff *skb)
+struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
 {
-       struct sk_buff *p, **pp = NULL;
-       struct ethhdr *eh, *eh2;
-       unsigned int hlen, off_eth;
        const struct packet_offload *ptype;
+       unsigned int hlen, off_eth;
+       struct sk_buff *pp = NULL;
+       struct ethhdr *eh, *eh2;
+       struct sk_buff *p;
        __be16 type;
        int flush = 1;
 
@@ -448,7 +448,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
 
        flush = 0;
 
-       for (p = *head; p; p = p->next) {
+       list_for_each_entry(p, head, list) {
                if (!NAPI_GRO_CB(p)->same_flow)
                        continue;
 
index a0768d2759b8ecb8954dd544561b68f26d0c6510..a60658c85a9ad09b405f2d928e70acf64a9ebc4d 100644 (file)
@@ -423,7 +423,7 @@ static const struct proto_ops ieee802154_raw_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = sock_no_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = ieee802154_sock_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = sock_no_shutdown,
@@ -969,7 +969,7 @@ static const struct proto_ops ieee802154_dgram_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = sock_no_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = ieee802154_sock_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = sock_no_shutdown,
index 15e125558c76e5fa2fe466ab0d64be1d3183ebed..f2a0a3bab6b5bd8eb8a1946a17ef8a3e0cea5dab 100644 (file)
@@ -229,6 +229,7 @@ int inet_listen(struct socket *sock, int backlog)
                err = inet_csk_listen_start(sk, backlog);
                if (err)
                        goto out;
+               tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
        }
        sk->sk_max_ack_backlog = backlog;
        err = 0;
@@ -986,7 +987,7 @@ const struct proto_ops inet_stream_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet_getname,
-       .poll_mask         = tcp_poll_mask,
+       .poll              = tcp_poll,
        .ioctl             = inet_ioctl,
        .listen            = inet_listen,
        .shutdown          = inet_shutdown,
@@ -1021,7 +1022,7 @@ const struct proto_ops inet_dgram_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = inet_getname,
-       .poll_mask         = udp_poll_mask,
+       .poll              = udp_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
@@ -1042,7 +1043,7 @@ EXPORT_SYMBOL(inet_dgram_ops);
 
 /*
  * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
- * udp_poll_mask
+ * udp_poll
  */
 static const struct proto_ops inet_sockraw_ops = {
        .family            = PF_INET,
@@ -1053,7 +1054,7 @@ static const struct proto_ops inet_sockraw_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = inet_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
@@ -1384,12 +1385,12 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
 }
 EXPORT_SYMBOL(inet_gso_segment);
 
-struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
 {
        const struct net_offload *ops;
-       struct sk_buff **pp = NULL;
-       struct sk_buff *p;
+       struct sk_buff *pp = NULL;
        const struct iphdr *iph;
+       struct sk_buff *p;
        unsigned int hlen;
        unsigned int off;
        unsigned int id;
@@ -1425,7 +1426,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
        id >>= 16;
 
-       for (p = *head; p; p = p->next) {
+       list_for_each_entry(p, head, list) {
                struct iphdr *iph2;
                u16 flush_id;
 
@@ -1505,8 +1506,8 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(inet_gro_receive);
 
-static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
-                                        struct sk_buff *skb)
+static struct sk_buff *ipip_gro_receive(struct list_head *head,
+                                       struct sk_buff *skb)
 {
        if (NAPI_GRO_CB(skb)->encap_mark) {
                NAPI_GRO_CB(skb)->flush = 1;
@@ -1882,6 +1883,7 @@ fs_initcall(ipv4_offload_init);
 static struct packet_type ip_packet_type __read_mostly = {
        .type = cpu_to_be16(ETH_P_IP),
        .func = ip_rcv,
+       .list_func = ip_list_rcv,
 };
 
 static int __init inet_init(void)
index 7cf755ef9efba3c13fcee22e1a24a320f4d9a503..bbeecd13e53477ce21c4c77f876f6e84966e1cb0 100644 (file)
@@ -28,8 +28,8 @@
 #include <linux/spinlock.h>
 #include <net/udp.h>
 
-static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
-                                        struct sk_buff *skb)
+static struct sk_buff *esp4_gro_receive(struct list_head *head,
+                                       struct sk_buff *skb)
 {
        int offset = skb_gro_offset(skb);
        struct xfrm_offload *xo;
index 1540db65241a6fd4d96b00546f13a3e3d3cd1815..500a59906b8719eb40fc3f37a0dc535b10b3069e 100644 (file)
@@ -224,14 +224,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        return 0;
 }
 
-static struct sk_buff **fou_gro_receive(struct sock *sk,
-                                       struct sk_buff **head,
-                                       struct sk_buff *skb)
+static struct sk_buff *fou_gro_receive(struct sock *sk,
+                                      struct list_head *head,
+                                      struct sk_buff *skb)
 {
-       const struct net_offload *ops;
-       struct sk_buff **pp = NULL;
        u8 proto = fou_from_sock(sk)->protocol;
        const struct net_offload **offloads;
+       const struct net_offload *ops;
+       struct sk_buff *pp = NULL;
 
        /* We can clear the encap_mark for FOU as we are essentially doing
         * one of two possible things.  We are either adding an L4 tunnel
@@ -305,13 +305,13 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
        return guehdr;
 }
 
-static struct sk_buff **gue_gro_receive(struct sock *sk,
-                                       struct sk_buff **head,
-                                       struct sk_buff *skb)
+static struct sk_buff *gue_gro_receive(struct sock *sk,
+                                      struct list_head *head,
+                                      struct sk_buff *skb)
 {
        const struct net_offload **offloads;
        const struct net_offload *ops;
-       struct sk_buff **pp = NULL;
+       struct sk_buff *pp = NULL;
        struct sk_buff *p;
        struct guehdr *guehdr;
        size_t len, optlen, hdrlen, off;
@@ -397,7 +397,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
 
        skb_gro_pull(skb, hdrlen);
 
-       for (p = *head; p; p = p->next) {
+       list_for_each_entry(p, head, list) {
                const struct guehdr *guehdr2;
 
                if (!NAPI_GRO_CB(p)->same_flow)
@@ -448,9 +448,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
-       skb_gro_remcsum_cleanup(skb, &grc);
-       skb->remcsum_offload = 0;
+       skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
 
        return pp;
 }
index 1859c473b21a862b383edebbcf2c1656f9c58b3b..6c63524f598a9b5171bfda0692df824883faa136 100644 (file)
@@ -108,10 +108,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
        return segs;
 }
 
-static struct sk_buff **gre_gro_receive(struct sk_buff **head,
-                                       struct sk_buff *skb)
+static struct sk_buff *gre_gro_receive(struct list_head *head,
+                                      struct sk_buff *skb)
 {
-       struct sk_buff **pp = NULL;
+       struct sk_buff *pp = NULL;
        struct sk_buff *p;
        const struct gre_base_hdr *greh;
        unsigned int hlen, grehlen;
@@ -182,7 +182,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
                                             null_compute_pseudo);
        }
 
-       for (p = *head; p; p = p->next) {
+       list_for_each_entry(p, head, list) {
                const struct gre_base_hdr *greh2;
 
                if (!NAPI_GRO_CB(p)->same_flow)
@@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 1617604c92847d5b0d058fac40dc3707f22348a5..695979b7ef6d08c2056384d90c9671efd7ae90dd 100644 (file)
@@ -429,14 +429,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
 
        icmp_param->data.icmph.checksum = 0;
 
+       ipcm_init(&ipc);
        inet->tos = ip_hdr(skb)->tos;
        sk->sk_mark = mark;
        daddr = ipc.addr = ip_hdr(skb)->saddr;
        saddr = fib_compute_spec_dst(skb);
-       ipc.opt = NULL;
-       ipc.tx_flags = 0;
-       ipc.ttl = 0;
-       ipc.tos = -1;
 
        if (icmp_param->replyopts.opt.opt.optlen) {
                ipc.opt = &icmp_param->replyopts.opt;
@@ -710,11 +707,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        icmp_param.offset = skb_network_offset(skb_in);
        inet_sk(sk)->tos = tos;
        sk->sk_mark = mark;
+       ipcm_init(&ipc);
        ipc.addr = iph->saddr;
        ipc.opt = &icmp_param.replyopts.opt;
-       ipc.tx_flags = 0;
-       ipc.ttl = 0;
-       ipc.tos = -1;
 
        rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
                               type, code, &icmp_param);
index c9e35b81d0931df8429a33e8d03e719b87da0747..316518f87294b7d04572cea63c75dad60f9c2dc2 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/skbuff.h>
 #include <linux/rtnetlink.h>
 #include <linux/slab.h>
+#include <linux/rhashtable.h>
 
 #include <net/sock.h>
 #include <net/inet_frag.h>
index 31ff46daae974645dfe73c97e6e507a0ad62dd4b..3647167c8fa313f9eb7a5c5ad34cb0cb7a7aea5e 100644 (file)
@@ -243,9 +243,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        bool dev_match = (sk->sk_bound_dev_if == dif ||
                                          sk->sk_bound_dev_if == sdif);
 
-                       if (exact_dif && !dev_match)
+                       if (!dev_match)
                                return -1;
-                       if (sk->sk_bound_dev_if && dev_match)
+                       if (sk->sk_bound_dev_if)
                                score += 4;
                }
                if (sk->sk_incoming_cpu == raw_smp_processor_id())
index 2d8efeecf61976f00c0700cc7f64f749b9482a73..c8ca5d8f0f75a911879bfbff4c2c38df77f56f00 100644 (file)
@@ -587,6 +587,8 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
                goto err_free_skb;
 
        key = &tun_info->key;
+       if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
+               goto err_free_rt;
        md = ip_tunnel_info_opts(tun_info);
        if (!md)
                goto err_free_rt;
index 7582713dd18f37b5c27cdc85ff62626a8ad4f435..3196cf58f4189d4d11c3b895b15f430c720e0a2d 100644 (file)
@@ -307,7 +307,8 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
        return true;
 }
 
-static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static int ip_rcv_finish_core(struct net *net, struct sock *sk,
+                             struct sk_buff *skb)
 {
        const struct iphdr *iph = ip_hdr(skb);
        int (*edemux)(struct sk_buff *skb);
@@ -315,13 +316,6 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
        struct rtable *rt;
        int err;
 
-       /* if ingress device is enslaved to an L3 master device pass the
-        * skb to its handler for processing
-        */
-       skb = l3mdev_ip_rcv(skb);
-       if (!skb)
-               return NET_RX_SUCCESS;
-
        if (net->ipv4.sysctl_ip_early_demux &&
            !skb_dst(skb) &&
            !skb->sk &&
@@ -393,7 +387,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
                        goto drop;
        }
 
-       return dst_input(skb);
+       return NET_RX_SUCCESS;
 
 drop:
        kfree_skb(skb);
@@ -405,13 +399,29 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
        goto drop;
 }
 
+static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+       int ret;
+
+       /* if ingress device is enslaved to an L3 master device pass the
+        * skb to its handler for processing
+        */
+       skb = l3mdev_ip_rcv(skb);
+       if (!skb)
+               return NET_RX_SUCCESS;
+
+       ret = ip_rcv_finish_core(net, sk, skb);
+       if (ret != NET_RX_DROP)
+               ret = dst_input(skb);
+       return ret;
+}
+
 /*
  *     Main IP Receive routine.
  */
-int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
 {
        const struct iphdr *iph;
-       struct net *net;
        u32 len;
 
        /* When the interface is in promisc. mode, drop all the crap
@@ -421,7 +431,6 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
                goto drop;
 
 
-       net = dev_net(dev);
        __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
 
        skb = skb_share_check(skb, GFP_ATOMIC);
@@ -489,9 +498,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
        /* Must drop socket now because of tproxy. */
        skb_orphan(skb);
 
-       return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
-                      net, NULL, skb, dev, NULL,
-                      ip_rcv_finish);
+       return skb;
 
 csum_error:
        __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
@@ -500,5 +507,113 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 drop:
        kfree_skb(skb);
 out:
-       return NET_RX_DROP;
+       return NULL;
+}
+
+/*
+ * IP receive entry point
+ */
+int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+          struct net_device *orig_dev)
+{
+       struct net *net = dev_net(dev);
+
+       skb = ip_rcv_core(skb, net);
+       if (skb == NULL)
+               return NET_RX_DROP;
+       return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
+                      net, NULL, skb, dev, NULL,
+                      ip_rcv_finish);
+}
+
+static void ip_sublist_rcv_finish(struct list_head *head)
+{
+       struct sk_buff *skb, *next;
+
+       list_for_each_entry_safe(skb, next, head, list) {
+               list_del(&skb->list);
+               /* Handle ip{6}_forward case, as sch_direct_xmit have
+                * another kind of SKB-list usage (see validate_xmit_skb_list)
+                */
+               skb->next = NULL;
+               dst_input(skb);
+       }
+}
+
+static void ip_list_rcv_finish(struct net *net, struct sock *sk,
+                              struct list_head *head)
+{
+       struct dst_entry *curr_dst = NULL;
+       struct sk_buff *skb, *next;
+       struct list_head sublist;
+
+       INIT_LIST_HEAD(&sublist);
+       list_for_each_entry_safe(skb, next, head, list) {
+               struct dst_entry *dst;
+
+               list_del(&skb->list);
+               /* if ingress device is enslaved to an L3 master device pass the
+                * skb to its handler for processing
+                */
+               skb = l3mdev_ip_rcv(skb);
+               if (!skb)
+                       continue;
+               if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
+                       continue;
+
+               dst = skb_dst(skb);
+               if (curr_dst != dst) {
+                       /* dispatch old sublist */
+                       if (!list_empty(&sublist))
+                               ip_sublist_rcv_finish(&sublist);
+                       /* start new sublist */
+                       INIT_LIST_HEAD(&sublist);
+                       curr_dst = dst;
+               }
+               list_add_tail(&skb->list, &sublist);
+       }
+       /* dispatch final sublist */
+       ip_sublist_rcv_finish(&sublist);
+}
+
+static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
+                          struct net *net)
+{
+       NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
+                    head, dev, NULL, ip_rcv_finish);
+       ip_list_rcv_finish(net, NULL, head);
+}
+
+/* Receive a list of IP packets */
+void ip_list_rcv(struct list_head *head, struct packet_type *pt,
+                struct net_device *orig_dev)
+{
+       struct net_device *curr_dev = NULL;
+       struct net *curr_net = NULL;
+       struct sk_buff *skb, *next;
+       struct list_head sublist;
+
+       INIT_LIST_HEAD(&sublist);
+       list_for_each_entry_safe(skb, next, head, list) {
+               struct net_device *dev = skb->dev;
+               struct net *net = dev_net(dev);
+
+               list_del(&skb->list);
+               skb = ip_rcv_core(skb, net);
+               if (skb == NULL)
+                       continue;
+
+               if (curr_dev != dev || curr_net != net) {
+                       /* dispatch old sublist */
+                       if (!list_empty(&sublist))
+                               ip_sublist_rcv(&sublist, curr_dev, curr_net);
+                       /* start new sublist */
+                       INIT_LIST_HEAD(&sublist);
+                       curr_dev = dev;
+                       curr_net = net;
+               }
+               list_add_tail(&skb->list, &sublist);
+       }
+       /* dispatch final sublist */
+       ip_sublist_rcv(&sublist, curr_dev, curr_net);
 }
index af5a830ff6ad320ae68066ab86476962db978f79..e2b6bd478afb5ea50b8182b32f87cc454a0c540e 100644 (file)
@@ -423,7 +423,8 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
 }
 
 /* Note: skb->sk can be different from sk, in case of tunnels */
-int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
+int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
+                   __u8 tos)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct net *net = sock_net(sk);
@@ -462,7 +463,7 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
                                           inet->inet_dport,
                                           inet->inet_sport,
                                           sk->sk_protocol,
-                                          RT_CONN_FLAGS(sk),
+                                          RT_CONN_FLAGS_TOS(sk, tos),
                                           sk->sk_bound_dev_if);
                if (IS_ERR(rt))
                        goto no_route;
@@ -478,7 +479,7 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
        skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
        skb_reset_network_header(skb);
        iph = ip_hdr(skb);
-       *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
+       *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
        if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
                iph->frag_off = htons(IP_DF);
        else
@@ -511,7 +512,7 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
        kfree_skb(skb);
        return -EHOSTUNREACH;
 }
-EXPORT_SYMBOL(ip_queue_xmit);
+EXPORT_SYMBOL(__ip_queue_xmit);
 
 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 {
@@ -1145,13 +1146,15 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
        cork->fragsize = ip_sk_use_pmtu(sk) ?
                         dst_mtu(&rt->dst) : rt->dst.dev->mtu;
 
-       cork->gso_size = sk->sk_type == SOCK_DGRAM ? ipc->gso_size : 0;
+       cork->gso_size = ipc->gso_size;
        cork->dst = &rt->dst;
        cork->length = 0;
        cork->ttl = ipc->ttl;
        cork->tos = ipc->tos;
        cork->priority = ipc->priority;
-       cork->tx_flags = ipc->tx_flags;
+       cork->transmit_time = ipc->sockc.transmit_time;
+       cork->tx_flags = 0;
+       sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
 
        return 0;
 }
@@ -1412,6 +1415,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
 
        skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
        skb->mark = sk->sk_mark;
+       skb->tstamp = cork->transmit_time;
        /*
         * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
         * on dst refcount
@@ -1544,11 +1548,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
        if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
                return;
 
+       ipcm_init(&ipc);
        ipc.addr = daddr;
-       ipc.opt = NULL;
-       ipc.tx_flags = 0;
-       ipc.ttl = 0;
-       ipc.tos = -1;
 
        if (replyopts.opt.opt.optlen) {
                ipc.opt = &replyopts.opt;
index 9f79b9803a161675c5907c1016fd219ad1f33fc9..5660adcf7a042ba675026a8397759618fd2a56b3 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/netfilter_ipv4.h>
 #include <linux/compat.h>
 #include <linux/export.h>
+#include <linux/rhashtable.h>
 #include <net/ip_tunnels.h>
 #include <net/checksum.h>
 #include <net/netlink.h>
@@ -1051,7 +1052,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
        struct sk_buff *skb;
        int ret;
 
-       if (assert == IGMPMSG_WHOLEPKT)
+       if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
                skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
        else
                skb = alloc_skb(128, GFP_ATOMIC);
@@ -1059,7 +1060,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
        if (!skb)
                return -ENOBUFS;
 
-       if (assert == IGMPMSG_WHOLEPKT) {
+       if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) {
                /* Ugly, but we have no choice with this interface.
                 * Duplicate old header, fix ihl, length etc.
                 * And all this only to mangle msg->im_msgtype and
@@ -1070,9 +1071,12 @@ static int ipmr_cache_report(struct mr_table *mrt,
                skb_reset_transport_header(skb);
                msg = (struct igmpmsg *)skb_network_header(skb);
                memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
-               msg->im_msgtype = IGMPMSG_WHOLEPKT;
+               msg->im_msgtype = assert;
                msg->im_mbz = 0;
-               msg->im_vif = mrt->mroute_reg_vif_num;
+               if (assert == IGMPMSG_WRVIFWHOLE)
+                       msg->im_vif = vifi;
+               else
+                       msg->im_vif = mrt->mroute_reg_vif_num;
                ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
                ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
                                             sizeof(struct iphdr));
@@ -1371,6 +1375,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
        struct mr_table *mrt;
        struct vifctl vif;
        struct mfcctl mfc;
+       bool do_wrvifwhole;
        u32 uval;
 
        /* There's one exception to the lock - MRT_DONE which needs to unlock */
@@ -1501,10 +1506,12 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                        break;
                }
 
+               do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE);
                val = !!val;
                if (val != mrt->mroute_do_pim) {
                        mrt->mroute_do_pim = val;
                        mrt->mroute_do_assert = val;
+                       mrt->mroute_do_wrvifwhole = do_wrvifwhole;
                }
                break;
        case MRT_TABLE:
@@ -1982,6 +1989,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
                               MFC_ASSERT_THRESH)) {
                        c->_c.mfc_un.res.last_assert = jiffies;
                        ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
+                       if (mrt->mroute_do_wrvifwhole)
+                               ipmr_cache_report(mrt, skb, true_vifi,
+                                                 IGMPMSG_WRVIFWHOLE);
                }
                goto dont_forward;
        }
@@ -2658,7 +2668,9 @@ static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
                        mrt->mroute_reg_vif_num) ||
            nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
                       mrt->mroute_do_assert) ||
-           nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim))
+           nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) ||
+           nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
+                      mrt->mroute_do_wrvifwhole))
                return false;
 
        return true;
index cafb0506c8c99d57606c314863e7c05c2a81ba69..1ad9aa62a97b28e2f30c6d63bbad2afb34385a0c 100644 (file)
@@ -2,6 +2,7 @@
  * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
  */
 
+#include <linux/rhashtable.h>
 #include <linux/mroute_base.h>
 
 /* Sets everything common except 'dev', since that is done under locking */
index 4388de0e5380c6423fbdfe7438727900fc297d7c..1e6f28c97d3a23bc4b17944a66ae92a93b214f69 100644 (file)
@@ -35,7 +35,7 @@ static const struct nf_loginfo default_loginfo = {
 };
 
 /* One level of recursion won't kill us */
-static void dump_ipv4_packet(struct nf_log_buf *m,
+static void dump_ipv4_packet(struct net *net, struct nf_log_buf *m,
                             const struct nf_loginfo *info,
                             const struct sk_buff *skb, unsigned int iphoff)
 {
@@ -183,7 +183,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
                        /* Max length: 3+maxlen */
                        if (!iphoff) { /* Only recurse once. */
                                nf_log_buf_add(m, "[");
-                               dump_ipv4_packet(m, info, skb,
+                               dump_ipv4_packet(net, m, info, skb,
                                            iphoff + ih->ihl*4+sizeof(_icmph));
                                nf_log_buf_add(m, "] ");
                        }
@@ -251,7 +251,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
 
        /* Max length: 15 "UID=4294967295 " */
        if ((logflags & NF_LOG_UID) && !iphoff)
-               nf_log_dump_sk_uid_gid(m, skb->sk);
+               nf_log_dump_sk_uid_gid(net, m, skb->sk);
 
        /* Max length: 16 "MARK=0xFFFFFFFF " */
        if (!iphoff && skb->mark)
@@ -333,7 +333,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
        if (in != NULL)
                dump_ipv4_mac_header(m, loginfo, skb);
 
-       dump_ipv4_packet(m, loginfo, skb, 0);
+       dump_ipv4_packet(net, m, loginfo, skb, 0);
 
        nf_log_buf_close(m);
 }
index 2ed64bca54e351e1ab51f7604b65ac72cbfb8e59..b54c964ad9256ca2c5ac7eff239366f2869588a9 100644 (file)
@@ -739,13 +739,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                /* no remote port */
        }
 
-       ipc.sockc.tsflags = sk->sk_tsflags;
-       ipc.addr = inet->inet_saddr;
-       ipc.opt = NULL;
-       ipc.oif = sk->sk_bound_dev_if;
-       ipc.tx_flags = 0;
-       ipc.ttl = 0;
-       ipc.tos = -1;
+       ipcm_init_sk(&ipc, inet);
 
        if (msg->msg_controllen) {
                err = ip_cmsg_send(sk, msg, &ipc, false);
@@ -769,8 +763,6 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                rcu_read_unlock();
        }
 
-       sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
-
        saddr = ipc.addr;
        ipc.addr = faddr = daddr;
 
index 77350c1256ce9bda462f5c81c91af3834e41b04a..b46e4cf9a55a1aa58e1fa344443e184053e05ffd 100644 (file)
@@ -287,6 +287,8 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPDelivered", LINUX_MIB_TCPDELIVERED),
        SNMP_MIB_ITEM("TCPDeliveredCE", LINUX_MIB_TCPDELIVEREDCE),
        SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED),
+       SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP),
+       SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP),
        SNMP_MIB_SENTINEL
 };
 
index abb3c9490c551781822f0fb40ca2bafe960c1339..33df4d76db2d948d620ffc809574b364ae24ad4b 100644 (file)
@@ -381,6 +381,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
 
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
+       skb->tstamp = sockc->transmit_time;
        skb_dst_set(skb, &rt->dst);
        *rtp = NULL;
 
@@ -561,13 +562,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                daddr = inet->inet_daddr;
        }
 
-       ipc.sockc.tsflags = sk->sk_tsflags;
-       ipc.addr = inet->inet_saddr;
-       ipc.opt = NULL;
-       ipc.tx_flags = 0;
-       ipc.ttl = 0;
-       ipc.tos = -1;
-       ipc.oif = sk->sk_bound_dev_if;
+       ipcm_init_sk(&ipc, inet);
 
        if (msg->msg_controllen) {
                err = ip_cmsg_send(sk, msg, &ipc, false);
@@ -670,8 +665,6 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                                      &rt, msg->msg_flags, &ipc.sockc);
 
         else {
-               sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
-
                if (!ipc.addr)
                        ipc.addr = fl4.daddr;
                lock_sock(sk);
index d06247ba08b2667b1049329e8921af9388545c54..af0a857d8352f2bc94d4f6baa0438a7ac9b2157b 100644 (file)
@@ -265,8 +265,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
            ipv4.sysctl_tcp_fastopen);
        struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
        struct tcp_fastopen_context *ctxt;
-       int ret;
        u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+       __le32 key[4];
+       int ret, i;
 
        tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
        if (!tbl.data)
@@ -275,11 +276,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
        rcu_read_lock();
        ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
        if (ctxt)
-               memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+               memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
        else
-               memset(user_key, 0, sizeof(user_key));
+               memset(key, 0, sizeof(key));
        rcu_read_unlock();
 
+       for (i = 0; i < ARRAY_SIZE(key); i++)
+               user_key[i] = le32_to_cpu(key[i]);
+
        snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
                user_key[0], user_key[1], user_key[2], user_key[3]);
        ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
@@ -290,13 +294,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
                        ret = -EINVAL;
                        goto bad_key;
                }
-               tcp_fastopen_reset_cipher(net, NULL, user_key,
+
+               for (i = 0; i < ARRAY_SIZE(user_key); i++)
+                       key[i] = cpu_to_le32(user_key[i]);
+
+               tcp_fastopen_reset_cipher(net, NULL, key,
                                          TCP_FASTOPEN_KEY_LENGTH);
        }
 
 bad_key:
        pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
-              user_key[0], user_key[1], user_key[2], user_key[3],
+               user_key[0], user_key[1], user_key[2], user_key[3],
               (char *)tbl.data, ret);
        kfree(tbl.data);
        return ret;
index 141acd92e58aeddeb9a0ba1eaacf3bd520a836a3..e3704a49164be5763151d848257ebe0ad2ec9984 100644 (file)
@@ -494,21 +494,32 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
 }
 
 /*
- * Socket is not locked. We are protected from async events by poll logic and
- * correct handling of state changes made by other threads is impossible in
- * any case.
+ *     Wait for a TCP event.
+ *
+ *     Note that we don't need to lock the socket, as the upper poll layers
+ *     take care of normal races (between the test and the event) and we don't
+ *     go look at any of the socket buffers directly.
  */
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
+       __poll_t mask;
        struct sock *sk = sock->sk;
        const struct tcp_sock *tp = tcp_sk(sk);
-       __poll_t mask = 0;
        int state;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        state = inet_sk_state_load(sk);
        if (state == TCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
+       /* Socket is not locked. We are protected from async events
+        * by poll logic and correct handling of state changes
+        * made by other threads is impossible in any case.
+        */
+
+       mask = 0;
+
        /*
         * EPOLLHUP is certainly not done right. But poll() doesn't
         * have a notion of HUP in just one direction, and for a
@@ -589,7 +600,7 @@ __poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(tcp_poll_mask);
+EXPORT_SYMBOL(tcp_poll);
 
 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
@@ -806,8 +817,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
                                 * This occurs when user tries to read
                                 * from never connected socket.
                                 */
-                               if (!sock_flag(sk, SOCK_DONE))
-                                       ret = -ENOTCONN;
+                               ret = -ENOTCONN;
                                break;
                        }
                        if (!timeo) {
@@ -1230,7 +1240,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
                /* 'common' sending to sendq */
        }
 
-       sockc.tsflags = sk->sk_tsflags;
+       sockcm_init(&sockc, sk);
        if (msg->msg_controllen) {
                err = sock_cmsg_send(sk, msg, &sockc);
                if (unlikely(err)) {
@@ -1264,9 +1274,6 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
                        int linear;
 
 new_segment:
-                       /* Allocate new segment. If the interface is SG,
-                        * allocate skb fitting to single page.
-                        */
                        if (!sk_stream_memory_free(sk))
                                goto wait_for_sndbuf;
 
@@ -2031,13 +2038,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                                break;
 
                        if (sk->sk_state == TCP_CLOSE) {
-                               if (!sock_flag(sk, SOCK_DONE)) {
-                                       /* This occurs when user tries to read
-                                        * from never connected socket.
-                                        */
-                                       copied = -ENOTCONN;
-                                       break;
-                               }
+                               /* This occurs when user tries to read
+                                * from never connected socket.
+                                */
+                               copied = -ENOTCONN;
                                break;
                        }
 
@@ -2563,6 +2567,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        sk->sk_shutdown = 0;
        sock_reset_flag(sk, SOCK_DONE);
        tp->srtt_us = 0;
+       tp->rcv_rtt_last_tsecr = 0;
        tp->write_seq += tp->max_window + 2;
        if (tp->write_seq == 0)
                tp->write_seq = 1;
index 58e2f479ffb4d523b4ccfbb859bdd186a55ab83d..3b5f45b9e81eb14ef39311a1e71fb01eb0622980 100644 (file)
@@ -205,7 +205,11 @@ static u32 bbr_bw(const struct sock *sk)
  */
 static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
 {
-       rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache);
+       unsigned int mss = tcp_sk(sk)->mss_cache;
+
+       if (!tcp_needs_internal_pacing(sk))
+               mss = tcp_mss_to_mtu(sk, mss);
+       rate *= mss;
        rate *= gain;
        rate >>= BBR_SCALE;
        rate *= USEC_PER_SEC;
index 355d3dffd021ccad0f30891994289d916f7d276c..91dbb9afb95021eb2ca928978de556b8557b38ec 100644 (file)
@@ -78,6 +78,7 @@
 #include <linux/errqueue.h>
 #include <trace/events/tcp.h>
 #include <linux/static_key.h>
+#include <net/busy_poll.h>
 
 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 
@@ -265,7 +266,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
                 * it is probably a retransmit.
                 */
                if (tp->ecn_flags & TCP_ECN_SEEN)
-                       tcp_enter_quickack_mode(sk, 1);
+                       tcp_enter_quickack_mode(sk, 2);
                break;
        case INET_ECN_CE:
                if (tcp_ca_needs_ecn(sk))
@@ -273,7 +274,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
 
                if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
                        /* Better not delay acks, sender can have a very low cwnd */
-                       tcp_enter_quickack_mode(sk, 1);
+                       tcp_enter_quickack_mode(sk, 2);
                        tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
                }
                tp->ecn_flags |= TCP_ECN_SEEN;
@@ -582,9 +583,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tp->rx_opt.rcv_tsecr &&
-           (TCP_SKB_CB(skb)->end_seq -
-            TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
+       if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr)
+               return;
+       tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
+
+       if (TCP_SKB_CB(skb)->end_seq -
+           TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
                u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
                u32 delta_us;
 
@@ -3181,6 +3185,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
 
                if (tcp_is_reno(tp)) {
                        tcp_remove_reno_sacks(sk, pkts_acked);
+
+                       /* If any of the cumulatively ACKed segments was
+                        * retransmitted, non-SACK case cannot confirm that
+                        * progress was due to original transmission due to
+                        * lack of TCPCB_SACKED_ACKED bits even if some of
+                        * the packets may have been never retransmitted.
+                        */
+                       if (flag & FLAG_RETRANS_DATA_ACKED)
+                               flag &= ~FLAG_ORIG_SACK_ACKED;
                } else {
                        int delta;
 
@@ -3449,7 +3462,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
 static void tcp_store_ts_recent(struct tcp_sock *tp)
 {
        tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
-       tp->rx_opt.ts_recent_stamp = get_seconds();
+       tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
 }
 
 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
@@ -4330,6 +4343,11 @@ static bool tcp_try_coalesce(struct sock *sk,
        if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
                return false;
 
+#ifdef CONFIG_TLS_DEVICE
+       if (from->decrypted != to->decrypted)
+               return false;
+#endif
+
        if (!skb_try_coalesce(to, from, fragstolen, &delta))
                return false;
 
@@ -4608,8 +4626,10 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
        skb->data_len = data_len;
        skb->len = size;
 
-       if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+       if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
                goto err_free;
+       }
 
        err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
        if (err)
@@ -4665,18 +4685,21 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
         *  Out of sequence packets to the out_of_order_queue.
         */
        if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
-               if (tcp_receive_window(tp) == 0)
+               if (tcp_receive_window(tp) == 0) {
+                       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
                        goto out_of_window;
+               }
 
                /* Ok. In sequence. In window. */
 queue_and_out:
                if (skb_queue_len(&sk->sk_receive_queue) == 0)
                        sk_forced_mem_schedule(sk, skb->truesize);
-               else if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+               else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
+                       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
                        goto drop;
+               }
 
                eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
-               tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
                if (skb->len)
                        tcp_event_data_recv(sk, skb);
                if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
@@ -4732,8 +4755,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
                /* If window is closed, drop tail of packet. But after
                 * remembering D-SACK for its head made in previous line.
                 */
-               if (!tcp_receive_window(tp))
+               if (!tcp_receive_window(tp)) {
+                       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
                        goto out_of_window;
+               }
                goto queue_and_out;
        }
 
@@ -4851,6 +4876,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
                        break;
 
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
+#ifdef CONFIG_TLS_DEVICE
+               nskb->decrypted = skb->decrypted;
+#endif
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
                if (list)
                        __skb_queue_before(list, skb, nskb);
@@ -4878,6 +4906,10 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
                                    skb == tail ||
                                    (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
                                        goto end;
+#ifdef CONFIG_TLS_DEVICE
+                               if (skb->decrypted != nskb->decrypted)
+                                       goto end;
+#endif
                        }
                }
        }
@@ -5475,6 +5507,11 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
                                tcp_ack(sk, skb, 0);
                                __kfree_skb(skb);
                                tcp_data_snd_check(sk);
+                               /* When receiving pure ack in fast path, update
+                                * last ts ecr directly instead of calling
+                                * tcp_rcv_rtt_measure_ts()
+                                */
+                               tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
                                return;
                        } else { /* Header too small */
                                TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
@@ -5576,6 +5613,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
        if (skb) {
                icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
                security_inet_conn_established(sk, skb);
+               sk_mark_napi_id(sk, skb);
        }
 
        tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB);
@@ -6404,6 +6442,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        tcp_rsk(req)->snt_isn = isn;
        tcp_rsk(req)->txhash = net_tx_rndhash();
        tcp_openreq_init_rwin(req, sk, dst);
+       sk_rx_queue_set(req_to_sk(req), skb);
        if (!want_cookie) {
                tcp_reqsk_record_syn(sk, req, skb);
                fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
index bea17f1e8302585d70c1e0108ae1c33d149230d8..dc415c66a33a15d4e5d81a8999500e4cabbf12b3 100644 (file)
@@ -155,7 +155,8 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
           and use initial timestamp retrieved from peer table.
         */
        if (tcptw->tw_ts_recent_stamp &&
-           (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
+           (!twp || (reuse && time_after32(ktime_get_seconds(),
+                                           tcptw->tw_ts_recent_stamp)))) {
                tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
                if (tp->write_seq == 0)
                        tp->write_seq = 1;
index 1dda1341a223937580b4efdbedb21ae50b221ff7..75ef332a7caf44de619acf030977eba01565c70a 100644 (file)
@@ -144,7 +144,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                tw->tw_substate   = TCP_TIME_WAIT;
                tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                if (tmp_opt.saw_tstamp) {
-                       tcptw->tw_ts_recent_stamp = get_seconds();
+                       tcptw->tw_ts_recent_stamp = ktime_get_seconds();
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
                }
 
@@ -189,7 +189,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
 
                if (tmp_opt.saw_tstamp) {
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
-                       tcptw->tw_ts_recent_stamp = get_seconds();
+                       tcptw->tw_ts_recent_stamp = ktime_get_seconds();
                }
 
                inet_twsk_put(tw);
@@ -449,119 +449,122 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
                                      struct sk_buff *skb)
 {
        struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
+       const struct inet_request_sock *ireq = inet_rsk(req);
+       struct tcp_request_sock *treq = tcp_rsk(req);
+       struct inet_connection_sock *newicsk;
+       struct tcp_sock *oldtp, *newtp;
 
-       if (newsk) {
-               const struct inet_request_sock *ireq = inet_rsk(req);
-               struct tcp_request_sock *treq = tcp_rsk(req);
-               struct inet_connection_sock *newicsk = inet_csk(newsk);
-               struct tcp_sock *newtp = tcp_sk(newsk);
-               struct tcp_sock *oldtp = tcp_sk(sk);
-
-               smc_check_reset_syn_req(oldtp, req, newtp);
-
-               /* Now setup tcp_sock */
-               newtp->pred_flags = 0;
-
-               newtp->rcv_wup = newtp->copied_seq =
-               newtp->rcv_nxt = treq->rcv_isn + 1;
-               newtp->segs_in = 1;
-
-               newtp->snd_sml = newtp->snd_una =
-               newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
-
-               INIT_LIST_HEAD(&newtp->tsq_node);
-               INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
-
-               tcp_init_wl(newtp, treq->rcv_isn);
-
-               newtp->srtt_us = 0;
-               newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
-               minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
-               newicsk->icsk_rto = TCP_TIMEOUT_INIT;
-               newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
-
-               newtp->packets_out = 0;
-               newtp->retrans_out = 0;
-               newtp->sacked_out = 0;
-               newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
-               newtp->tlp_high_seq = 0;
-               newtp->lsndtime = tcp_jiffies32;
-               newsk->sk_txhash = treq->txhash;
-               newtp->last_oow_ack_time = 0;
-               newtp->total_retrans = req->num_retrans;
-
-               /* So many TCP implementations out there (incorrectly) count the
-                * initial SYN frame in their delayed-ACK and congestion control
-                * algorithms that we must have the following bandaid to talk
-                * efficiently to them.  -DaveM
-                */
-               newtp->snd_cwnd = TCP_INIT_CWND;
-               newtp->snd_cwnd_cnt = 0;
-
-               /* There's a bubble in the pipe until at least the first ACK. */
-               newtp->app_limited = ~0U;
-
-               tcp_init_xmit_timers(newsk);
-               newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
-
-               newtp->rx_opt.saw_tstamp = 0;
-
-               newtp->rx_opt.dsack = 0;
-               newtp->rx_opt.num_sacks = 0;
-
-               newtp->urg_data = 0;
-
-               if (sock_flag(newsk, SOCK_KEEPOPEN))
-                       inet_csk_reset_keepalive_timer(newsk,
-                                                      keepalive_time_when(newtp));
-
-               newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
-               newtp->rx_opt.sack_ok = ireq->sack_ok;
-               newtp->window_clamp = req->rsk_window_clamp;
-               newtp->rcv_ssthresh = req->rsk_rcv_wnd;
-               newtp->rcv_wnd = req->rsk_rcv_wnd;
-               newtp->rx_opt.wscale_ok = ireq->wscale_ok;
-               if (newtp->rx_opt.wscale_ok) {
-                       newtp->rx_opt.snd_wscale = ireq->snd_wscale;
-                       newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
-               } else {
-                       newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
-                       newtp->window_clamp = min(newtp->window_clamp, 65535U);
-               }
-               newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
-                                 newtp->rx_opt.snd_wscale);
-               newtp->max_window = newtp->snd_wnd;
-
-               if (newtp->rx_opt.tstamp_ok) {
-                       newtp->rx_opt.ts_recent = req->ts_recent;
-                       newtp->rx_opt.ts_recent_stamp = get_seconds();
-                       newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
-               } else {
-                       newtp->rx_opt.ts_recent_stamp = 0;
-                       newtp->tcp_header_len = sizeof(struct tcphdr);
-               }
-               newtp->tsoffset = treq->ts_off;
+       if (!newsk)
+               return NULL;
+
+       newicsk = inet_csk(newsk);
+       newtp = tcp_sk(newsk);
+       oldtp = tcp_sk(sk);
+
+       smc_check_reset_syn_req(oldtp, req, newtp);
+
+       /* Now setup tcp_sock */
+       newtp->pred_flags = 0;
+
+       newtp->rcv_wup = newtp->copied_seq =
+       newtp->rcv_nxt = treq->rcv_isn + 1;
+       newtp->segs_in = 1;
+
+       newtp->snd_sml = newtp->snd_una =
+       newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
+
+       INIT_LIST_HEAD(&newtp->tsq_node);
+       INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
+
+       tcp_init_wl(newtp, treq->rcv_isn);
+
+       newtp->srtt_us = 0;
+       newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+       minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
+       newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+       newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
+
+       newtp->packets_out = 0;
+       newtp->retrans_out = 0;
+       newtp->sacked_out = 0;
+       newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+       newtp->tlp_high_seq = 0;
+       newtp->lsndtime = tcp_jiffies32;
+       newsk->sk_txhash = treq->txhash;
+       newtp->last_oow_ack_time = 0;
+       newtp->total_retrans = req->num_retrans;
+
+       /* So many TCP implementations out there (incorrectly) count the
+        * initial SYN frame in their delayed-ACK and congestion control
+        * algorithms that we must have the following bandaid to talk
+        * efficiently to them.  -DaveM
+        */
+       newtp->snd_cwnd = TCP_INIT_CWND;
+       newtp->snd_cwnd_cnt = 0;
+
+       /* There's a bubble in the pipe until at least the first ACK. */
+       newtp->app_limited = ~0U;
+
+       tcp_init_xmit_timers(newsk);
+       newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
+
+       newtp->rx_opt.saw_tstamp = 0;
+
+       newtp->rx_opt.dsack = 0;
+       newtp->rx_opt.num_sacks = 0;
+
+       newtp->urg_data = 0;
+
+       if (sock_flag(newsk, SOCK_KEEPOPEN))
+               inet_csk_reset_keepalive_timer(newsk,
+                                              keepalive_time_when(newtp));
+
+       newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
+       newtp->rx_opt.sack_ok = ireq->sack_ok;
+       newtp->window_clamp = req->rsk_window_clamp;
+       newtp->rcv_ssthresh = req->rsk_rcv_wnd;
+       newtp->rcv_wnd = req->rsk_rcv_wnd;
+       newtp->rx_opt.wscale_ok = ireq->wscale_ok;
+       if (newtp->rx_opt.wscale_ok) {
+               newtp->rx_opt.snd_wscale = ireq->snd_wscale;
+               newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
+       } else {
+               newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
+               newtp->window_clamp = min(newtp->window_clamp, 65535U);
+       }
+       newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
+       newtp->max_window = newtp->snd_wnd;
+
+       if (newtp->rx_opt.tstamp_ok) {
+               newtp->rx_opt.ts_recent = req->ts_recent;
+               newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
+               newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
+       } else {
+               newtp->rx_opt.ts_recent_stamp = 0;
+               newtp->tcp_header_len = sizeof(struct tcphdr);
+       }
+       newtp->tsoffset = treq->ts_off;
 #ifdef CONFIG_TCP_MD5SIG
-               newtp->md5sig_info = NULL;      /*XXX*/
-               if (newtp->af_specific->md5_lookup(sk, newsk))
-                       newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
+       newtp->md5sig_info = NULL;      /*XXX*/
+       if (newtp->af_specific->md5_lookup(sk, newsk))
+               newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
 #endif
-               if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
-                       newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
-               newtp->rx_opt.mss_clamp = req->mss;
-               tcp_ecn_openreq_child(newtp, req);
-               newtp->fastopen_req = NULL;
-               newtp->fastopen_rsk = NULL;
-               newtp->syn_data_acked = 0;
-               newtp->rack.mstamp = 0;
-               newtp->rack.advanced = 0;
-               newtp->rack.reo_wnd_steps = 1;
-               newtp->rack.last_delivered = 0;
-               newtp->rack.reo_wnd_persist = 0;
-               newtp->rack.dsack_seen = 0;
-
-               __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
-       }
+       if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
+               newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
+       newtp->rx_opt.mss_clamp = req->mss;
+       tcp_ecn_openreq_child(newtp, req);
+       newtp->fastopen_req = NULL;
+       newtp->fastopen_rsk = NULL;
+       newtp->syn_data_acked = 0;
+       newtp->rack.mstamp = 0;
+       newtp->rack.advanced = 0;
+       newtp->rack.reo_wnd_steps = 1;
+       newtp->rack.last_delivered = 0;
+       newtp->rack.reo_wnd_persist = 0;
+       newtp->rack.dsack_seen = 0;
+
+       __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
+
        return newsk;
 }
 EXPORT_SYMBOL(tcp_create_openreq_child);
@@ -600,7 +603,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                         * it can be estimated (approximately)
                         * from another data.
                         */
-                       tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
+                       tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
                        paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
                }
        }
index 8cc7c348733052a8ef4bc06d09149171d8277006..870b0a3350616a87580882cbc06382f5e415aef5 100644 (file)
@@ -180,9 +180,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
        return segs;
 }
 
-struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
 {
-       struct sk_buff **pp = NULL;
+       struct sk_buff *pp = NULL;
        struct sk_buff *p;
        struct tcphdr *th;
        struct tcphdr *th2;
@@ -220,7 +220,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
        len = skb_gro_len(skb);
        flags = tcp_flag_word(th);
 
-       for (; (p = *head); head = &p->next) {
+       list_for_each_entry(p, head, list) {
                if (!NAPI_GRO_CB(p)->same_flow)
                        continue;
 
@@ -233,7 +233,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
                goto found;
        }
-
+       p = NULL;
        goto out_check_final;
 
 found:
@@ -262,8 +262,11 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
        flush |= (len - 1) >= mss;
        flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
+#ifdef CONFIG_TLS_DEVICE
+       flush |= p->decrypted ^ skb->decrypted;
+#endif
 
-       if (flush || skb_gro_receive(head, skb)) {
+       if (flush || skb_gro_receive(p, skb)) {
                mss = 1;
                goto out_check_final;
        }
@@ -277,7 +280,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                                        TCP_FLAG_FIN));
 
        if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
-               pp = head;
+               pp = p;
 
 out:
        NAPI_GRO_CB(skb)->flush |= (flush != 0);
@@ -302,7 +305,7 @@ int tcp_gro_complete(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_gro_complete);
 
-static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+static struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
 {
        /* Don't bother verifying checksum if we're going to flush anyway. */
        if (!NAPI_GRO_CB(skb)->flush &&
index 8e08b409c71e1f8e69422f1756d48b5bc55411c3..f8f6129160ddbdcd0419b5953906499270fe7ff0 100644 (file)
@@ -973,17 +973,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-/* BBR congestion control needs pacing.
- * Same remark for SO_MAX_PACING_RATE.
- * sch_fq packet scheduler is efficiently handling pacing,
- * but is not always installed/used.
- * Return true if TCP stack should pace packets itself.
- */
-static bool tcp_needs_internal_pacing(const struct sock *sk)
-{
-       return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
-}
-
 static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
 {
        u64 len_ns;
@@ -995,9 +984,6 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
        if (!rate || rate == ~0U)
                return;
 
-       /* Should account for header sizes as sch_fq does,
-        * but lets make things simple.
-        */
        len_ns = (u64)skb->len * NSEC_PER_SEC;
        do_div(len_ns, rate);
        hrtimer_start(&tcp_sk(sk)->pacing_timer,
index c61240e43923d6dd6a5d6215074e2da2c2bc71f4..4dff40dad4dc5ccc372f5108b0d6ba38497ab81f 100644 (file)
@@ -146,6 +146,10 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
                                    rs->prior_mstamp); /* ack phase */
        rs->interval_us = max(snd_us, ack_us);
 
+       /* Record both segment send and ack receive intervals */
+       rs->snd_interval_us = snd_us;
+       rs->rcv_interval_us = ack_us;
+
        /* Normally we expect interval_us >= min-rtt.
         * Note that rate may still be over-estimated when a spuriously
         * retransmistted skb was first (s)acked because "interval_us"
index 9bb27df4dac5ec5f133b15e972f384bdc1d165b1..060e841dde400fd1e1c02992fa448dc60305cec3 100644 (file)
@@ -926,11 +926,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
                return -EOPNOTSUPP;
 
-       ipc.opt = NULL;
-       ipc.tx_flags = 0;
-       ipc.ttl = 0;
-       ipc.tos = -1;
-
        getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
 
        fl4 = &inet->cork.fl.u.ip4;
@@ -977,9 +972,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                connected = 1;
        }
 
-       ipc.sockc.tsflags = sk->sk_tsflags;
-       ipc.addr = inet->inet_saddr;
-       ipc.oif = sk->sk_bound_dev_if;
+       ipcm_init_sk(&ipc, inet);
        ipc.gso_size = up->gso_size;
 
        if (msg->msg_controllen) {
@@ -1027,8 +1020,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        saddr = ipc.addr;
        ipc.addr = faddr = daddr;
 
-       sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
-
        if (ipc.opt && ipc.opt->opt.srr) {
                if (!daddr) {
                        err = -EINVAL;
@@ -2591,7 +2582,7 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  *     udp_poll - wait for a UDP event.
  *     @file - file struct
  *     @sock - socket
- *     @events - events to wait for
+ *     @wait - poll table
  *
  *     This is same as datagram poll, except for the special case of
  *     blocking sockets. If application is using a blocking fd
@@ -2600,23 +2591,23 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  *     but then block when reading it. Add special case code
  *     to work around these arguably broken applications.
  */
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
 
        if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Check for false positives due to checksum errors */
-       if ((mask & EPOLLRDNORM) && !(sock->file->f_flags & O_NONBLOCK) &&
+       if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
            !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
                mask &= ~(EPOLLIN | EPOLLRDNORM);
 
        return mask;
 
 }
-EXPORT_SYMBOL(udp_poll_mask);
+EXPORT_SYMBOL(udp_poll);
 
 int udp_abort(struct sock *sk, int err)
 {
index 92dc9e5a7ff3d0a7509bfa2a66e9189c8341a5fa..0c0522b79b43f09785ce8fd5f0dc9461a93f0e98 100644 (file)
@@ -343,10 +343,11 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
        return segs;
 }
 
-struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
-                                struct udphdr *uh, udp_lookup_t lookup)
+struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+                               struct udphdr *uh, udp_lookup_t lookup)
 {
-       struct sk_buff *p, **pp = NULL;
+       struct sk_buff *pp = NULL;
+       struct sk_buff *p;
        struct udphdr *uh2;
        unsigned int off = skb_gro_offset(skb);
        int flush = 1;
@@ -371,7 +372,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
 unflush:
        flush = 0;
 
-       for (p = *head; p; p = p->next) {
+       list_for_each_entry(p, head, list) {
                if (!NAPI_GRO_CB(p)->same_flow)
                        continue;
 
@@ -394,13 +395,13 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
        return pp;
 }
 EXPORT_SYMBOL(udp_gro_receive);
 
-static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
-                                        struct sk_buff *skb)
+static struct sk_buff *udp4_gro_receive(struct list_head *head,
+                                       struct sk_buff *skb)
 {
        struct udphdr *uh = udp_gro_udphdr(skb);
 
index c134286d6a4179516709570ad534d1ae26fd0bce..1659a6b3cf4220f59048561239e379b4d234ba1e 100644 (file)
@@ -385,8 +385,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
 
        if (ndev->cnf.stable_secret.initialized)
                ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
-       else
-               ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
 
        ndev->cnf.mtu6 = dev->mtu;
        ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
@@ -4528,6 +4526,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
                               unsigned long expires, u32 flags)
 {
        struct fib6_info *f6i;
+       u32 prio;
 
        f6i = addrconf_get_prefix_route(&ifp->addr,
                                        ifp->prefix_len,
@@ -4536,13 +4535,15 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
        if (!f6i)
                return -ENOENT;
 
-       if (f6i->fib6_metric != ifp->rt_priority) {
+       prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
+       if (f6i->fib6_metric != prio) {
+               /* delete old one */
+               ip6_del_rt(dev_net(ifp->idev->dev), f6i);
+
                /* add new one */
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
                                      ifp->rt_priority, ifp->idev->dev,
                                      expires, flags, GFP_KERNEL);
-               /* delete old one */
-               ip6_del_rt(dev_net(ifp->idev->dev), f6i);
        } else {
                if (!expires)
                        fib6_clean_expires(f6i);
@@ -5207,7 +5208,9 @@ static inline size_t inet6_ifla6_size(void)
             + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
             + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
             + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
-            + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */
+            + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
+            + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
+            + 0;
 }
 
 static inline size_t inet6_if_nlmsg_size(void)
@@ -5889,32 +5892,31 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
                                         loff_t *ppos)
 {
        int ret = 0;
-       int new_val;
+       u32 new_val;
        struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
        struct net *net = (struct net *)ctl->extra2;
+       struct ctl_table tmp = {
+               .data = &new_val,
+               .maxlen = sizeof(new_val),
+               .mode = ctl->mode,
+       };
 
        if (!rtnl_trylock())
                return restart_syscall();
 
-       ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+       new_val = *((u32 *)ctl->data);
 
-       if (write) {
-               new_val = *((int *)ctl->data);
+       ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
+       if (ret != 0)
+               goto out;
 
+       if (write) {
                if (check_addr_gen_mode(new_val) < 0) {
                        ret = -EINVAL;
                        goto out;
                }
 
-               /* request for default */
-               if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
-                       ipv6_devconf_dflt.addr_gen_mode = new_val;
-
-               /* request for individual net device */
-               } else {
-                       if (!idev)
-                               goto out;
-
+               if (idev) {
                        if (check_stable_privacy(idev, net, new_val) < 0) {
                                ret = -EINVAL;
                                goto out;
@@ -5924,7 +5926,21 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
                                idev->cnf.addr_gen_mode = new_val;
                                addrconf_dev_config(idev->dev);
                        }
+               } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
+                       struct net_device *dev;
+
+                       net->ipv6.devconf_dflt->addr_gen_mode = new_val;
+                       for_each_netdev(net, dev) {
+                               idev = __in6_dev_get(dev);
+                               if (idev &&
+                                   idev->cnf.addr_gen_mode != new_val) {
+                                       idev->cnf.addr_gen_mode = new_val;
+                                       addrconf_dev_config(idev->dev);
+                               }
+                       }
                }
+
+               *((u32 *)ctl->data) = new_val;
        }
 
 out:
index 74f2a261e8df4dc78a3baddb31609cdc70ba6035..c9535354149fd3149f4f9d491c2aa14562fb2c46 100644 (file)
@@ -570,7 +570,7 @@ const struct proto_ops inet6_stream_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = inet_accept,               /* ok           */
        .getname           = inet6_getname,
-       .poll_mask         = tcp_poll_mask,             /* ok           */
+       .poll              = tcp_poll,                  /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = inet_listen,               /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
@@ -603,7 +603,7 @@ const struct proto_ops inet6_dgram_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = sock_no_accept,            /* a do nothing */
        .getname           = inet6_getname,
-       .poll_mask         = udp_poll_mask,             /* ok           */
+       .poll              = udp_poll,                  /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = sock_no_listen,            /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
@@ -764,6 +764,7 @@ EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
 static struct packet_type ipv6_packet_type __read_mostly = {
        .type = cpu_to_be16(ETH_P_IPV6),
        .func = ipv6_rcv,
+       .list_func = ipv6_list_rcv,
 };
 
 static int __init ipv6_packet_init(void)
index 2ee08b6a86a4881210f5a0c81206a64a562e5a56..201306b9b5ea062b47c8be8335c7a6ae66b59ff2 100644 (file)
@@ -736,7 +736,7 @@ EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
 
 int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
                          struct msghdr *msg, struct flowi6 *fl6,
-                         struct ipcm6_cookie *ipc6, struct sockcm_cookie *sockc)
+                         struct ipcm6_cookie *ipc6)
 {
        struct in6_pktinfo *src_info;
        struct cmsghdr *cmsg;
@@ -755,7 +755,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
                }
 
                if (cmsg->cmsg_level == SOL_SOCKET) {
-                       err = __sock_cmsg_send(sk, msg, cmsg, sockc);
+                       err = __sock_cmsg_send(sk, msg, cmsg, &ipc6->sockc);
                        if (err)
                                return err;
                        continue;
index 27f59b61f70f59f6b4a4502727a161b5f1b91ef1..ddfa533a84e501dfb5b888b0d558dd59af43ec49 100644 (file)
@@ -49,8 +49,8 @@ static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
        return 0;
 }
 
-static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
-                                        struct sk_buff *skb)
+static struct sk_buff *esp6_gro_receive(struct list_head *head,
+                                       struct sk_buff *skb)
 {
        int offset = skb_gro_offset(skb);
        struct xfrm_offload *xo;
index be491bf6ab6e9ff4d1a9d84bc78c4582f4fe8e01..24611c8b056230f1c80bfc739f6383089e9388e9 100644 (file)
@@ -430,7 +430,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
        struct icmp6hdr tmp_hdr;
        struct flowi6 fl6;
        struct icmpv6_msg msg;
-       struct sockcm_cookie sockc_unused = {0};
        struct ipcm6_cookie ipc6;
        int iif = 0;
        int addr_type = 0;
@@ -545,7 +544,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
        else if (!fl6.flowi6_oif)
                fl6.flowi6_oif = np->ucast_oif;
 
-       ipc6.tclass = np->tclass;
+       ipcm6_init_sk(&ipc6, np);
        fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
 
        dst = icmpv6_route_lookup(net, skb, sk, &fl6);
@@ -553,8 +552,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
                goto out;
 
        ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
-       ipc6.dontfrag = np->dontfrag;
-       ipc6.opt = NULL;
 
        msg.skb = skb;
        msg.offset = skb_network_offset(skb);
@@ -575,7 +572,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
                            len + sizeof(struct icmp6hdr),
                            sizeof(struct icmp6hdr),
                            &ipc6, &fl6, (struct rt6_info *)dst,
-                           MSG_DONTWAIT, &sockc_unused)) {
+                           MSG_DONTWAIT)) {
                ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
                ip6_flush_pending_frames(sk);
        } else {
@@ -679,7 +676,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        struct dst_entry *dst;
        struct ipcm6_cookie ipc6;
        u32 mark = IP6_REPLY_MARK(net, skb->mark);
-       struct sockcm_cookie sockc_unused = {0};
 
        saddr = &ipv6_hdr(skb)->daddr;
 
@@ -726,16 +722,14 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        msg.offset = 0;
        msg.type = ICMPV6_ECHO_REPLY;
 
+       ipcm6_init_sk(&ipc6, np);
        ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
        ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
-       ipc6.dontfrag = np->dontfrag;
-       ipc6.opt = NULL;
 
        if (ip6_append_data(sk, icmpv6_getfrag, &msg,
                            skb->len + sizeof(struct icmp6hdr),
                            sizeof(struct icmp6hdr), &ipc6, &fl6,
-                           (struct rt6_info *)dst, MSG_DONTWAIT,
-                           &sockc_unused)) {
+                           (struct rt6_info *)dst, MSG_DONTWAIT)) {
                __ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
                ip6_flush_pending_frames(sk);
        } else {
index 4b32e5921e5ced56de7101c816c3d0c40b3df2cb..b7739aba6e684bc68bc76f38087e449ee10383ba 100644 (file)
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_IPV6_ILA) += ila.o
 
-ila-objs := ila_common.o ila_lwt.o ila_xlat.o
+ila-objs := ila_main.o ila_common.o ila_lwt.o ila_xlat.o
index 3c7a11b62334da87b9e7e7f1b06cab47e0b5f044..1f747bcbec295303c8e08307a87c6c55221194d1 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/skbuff.h>
 #include <linux/types.h>
 #include <net/checksum.h>
+#include <net/genetlink.h>
 #include <net/ip.h>
 #include <net/protocol.h>
 #include <uapi/linux/ila.h>
@@ -104,9 +105,31 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
 
 void ila_init_saved_csum(struct ila_params *p);
 
+struct ila_net {
+       struct {
+               struct rhashtable rhash_table;
+               spinlock_t *locks; /* Bucket locks for entry manipulation */
+               unsigned int locks_mask;
+               bool hooks_registered;
+       } xlat;
+};
+
 int ila_lwt_init(void);
 void ila_lwt_fini(void);
-int ila_xlat_init(void);
-void ila_xlat_fini(void);
+
+int ila_xlat_init_net(struct net *net);
+void ila_xlat_exit_net(struct net *net);
+
+int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);
+int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info);
+int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info);
+int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info);
+int ila_xlat_nl_dump_start(struct netlink_callback *cb);
+int ila_xlat_nl_dump_done(struct netlink_callback *cb);
+int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb);
+
+extern unsigned int ila_net_id;
+
+extern struct genl_family ila_nl_family;
 
 #endif /* __ILA_H */
index 8c88ecf29b93529acd8014dabdd618362a140922..579310466eace8a3f3981e89cdc240249065c0a8 100644 (file)
@@ -154,33 +154,3 @@ void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p,
        iaddr->loc = p->locator;
 }
 
-static int __init ila_init(void)
-{
-       int ret;
-
-       ret = ila_lwt_init();
-
-       if (ret)
-               goto fail_lwt;
-
-       ret = ila_xlat_init();
-       if (ret)
-               goto fail_xlat;
-
-       return 0;
-fail_xlat:
-       ila_lwt_fini();
-fail_lwt:
-       return ret;
-}
-
-static void __exit ila_fini(void)
-{
-       ila_xlat_fini();
-       ila_lwt_fini();
-}
-
-module_init(ila_init);
-module_exit(ila_fini);
-MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
-MODULE_LICENSE("GPL");
diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c
new file mode 100644 (file)
index 0000000..18fac76
--- /dev/null
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <net/genetlink.h>
+#include <net/ila.h>
+#include <net/netns/generic.h>
+#include <uapi/linux/genetlink.h>
+#include "ila.h"
+
+static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
+       [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
+       [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, },
+       [ILA_ATTR_IFINDEX] = { .type = NLA_U32, },
+       [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
+       [ILA_ATTR_IDENT_TYPE] = { .type = NLA_U8, },
+};
+
+static const struct genl_ops ila_nl_ops[] = {
+       {
+               .cmd = ILA_CMD_ADD,
+               .doit = ila_xlat_nl_cmd_add_mapping,
+               .policy = ila_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = ILA_CMD_DEL,
+               .doit = ila_xlat_nl_cmd_del_mapping,
+               .policy = ila_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = ILA_CMD_FLUSH,
+               .doit = ila_xlat_nl_cmd_flush,
+               .policy = ila_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = ILA_CMD_GET,
+               .doit = ila_xlat_nl_cmd_get_mapping,
+               .start = ila_xlat_nl_dump_start,
+               .dumpit = ila_xlat_nl_dump,
+               .done = ila_xlat_nl_dump_done,
+               .policy = ila_nl_policy,
+       },
+};
+
+unsigned int ila_net_id;
+
+struct genl_family ila_nl_family __ro_after_init = {
+       .hdrsize        = 0,
+       .name           = ILA_GENL_NAME,
+       .version        = ILA_GENL_VERSION,
+       .maxattr        = ILA_ATTR_MAX,
+       .netnsok        = true,
+       .parallel_ops   = true,
+       .module         = THIS_MODULE,
+       .ops            = ila_nl_ops,
+       .n_ops          = ARRAY_SIZE(ila_nl_ops),
+};
+
+static __net_init int ila_init_net(struct net *net)
+{
+       int err;
+
+       err = ila_xlat_init_net(net);
+       if (err)
+               goto ila_xlat_init_fail;
+
+       return 0;
+
+ila_xlat_init_fail:
+       return err;
+}
+
+static __net_exit void ila_exit_net(struct net *net)
+{
+       ila_xlat_exit_net(net);
+}
+
+static struct pernet_operations ila_net_ops = {
+       .init = ila_init_net,
+       .exit = ila_exit_net,
+       .id   = &ila_net_id,
+       .size = sizeof(struct ila_net),
+};
+
+static int __init ila_init(void)
+{
+       int ret;
+
+       ret = register_pernet_device(&ila_net_ops);
+       if (ret)
+               goto register_device_fail;
+
+       ret = genl_register_family(&ila_nl_family);
+       if (ret)
+               goto register_family_fail;
+
+       ret = ila_lwt_init();
+       if (ret)
+               goto fail_lwt;
+
+       return 0;
+
+fail_lwt:
+       genl_unregister_family(&ila_nl_family);
+register_family_fail:
+       unregister_pernet_device(&ila_net_ops);
+register_device_fail:
+       return ret;
+}
+
+static void __exit ila_fini(void)
+{
+       ila_lwt_fini();
+       genl_unregister_family(&ila_nl_family);
+       unregister_pernet_device(&ila_net_ops);
+}
+
+module_init(ila_init);
+module_exit(ila_fini);
+MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
+MODULE_LICENSE("GPL");
index 10ae13560b407e28643fe2ed772de868ef41804f..51a15ce50a6444c9f0f77079bea70d95ff2a80d2 100644 (file)
@@ -22,36 +22,14 @@ struct ila_map {
        struct rcu_head rcu;
 };
 
-static unsigned int ila_net_id;
-
-struct ila_net {
-       struct rhashtable rhash_table;
-       spinlock_t *locks; /* Bucket locks for entry manipulation */
-       unsigned int locks_mask;
-       bool hooks_registered;
-};
-
+#define MAX_LOCKS 1024
 #define        LOCKS_PER_CPU 10
 
 static int alloc_ila_locks(struct ila_net *ilan)
 {
-       unsigned int i, size;
-       unsigned int nr_pcpus = num_possible_cpus();
-
-       nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
-       size = roundup_pow_of_two(nr_pcpus * LOCKS_PER_CPU);
-
-       if (sizeof(spinlock_t) != 0) {
-               ilan->locks = kvmalloc_array(size, sizeof(spinlock_t),
-                                            GFP_KERNEL);
-               if (!ilan->locks)
-                       return -ENOMEM;
-               for (i = 0; i < size; i++)
-                       spin_lock_init(&ilan->locks[i]);
-       }
-       ilan->locks_mask = size - 1;
-
-       return 0;
+       return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask,
+                                     MAX_LOCKS, LOCKS_PER_CPU,
+                                     GFP_KERNEL);
 }
 
 static u32 hashrnd __read_mostly;
@@ -71,7 +49,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc)
 static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
                                       struct ila_locator loc)
 {
-       return &ilan->locks[ila_locator_hash(loc) & ilan->locks_mask];
+       return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask];
 }
 
 static inline int ila_cmp_wildcards(struct ila_map *ila,
@@ -115,16 +93,6 @@ static const struct rhashtable_params rht_params = {
        .obj_cmpfn = ila_cmpfn,
 };
 
-static struct genl_family ila_nl_family;
-
-static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
-       [ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
-       [ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, },
-       [ILA_ATTR_IFINDEX] = { .type = NLA_U32, },
-       [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
-       [ILA_ATTR_IDENT_TYPE] = { .type = NLA_U8, },
-};
-
 static int parse_nl_config(struct genl_info *info,
                           struct ila_xlat_params *xp)
 {
@@ -162,7 +130,7 @@ static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
 {
        struct ila_map *ila;
 
-       ila = rhashtable_lookup_fast(&ilan->rhash_table, &iaddr->loc,
+       ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc,
                                     rht_params);
        while (ila) {
                if (!ila_cmp_wildcards(ila, iaddr, ifindex))
@@ -179,7 +147,7 @@ static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
 {
        struct ila_map *ila;
 
-       ila = rhashtable_lookup_fast(&ilan->rhash_table,
+       ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
                                     &xp->ip.locator_match,
                                     rht_params);
        while (ila) {
@@ -196,9 +164,9 @@ static inline void ila_release(struct ila_map *ila)
        kfree_rcu(ila, rcu);
 }
 
-static void ila_free_cb(void *ptr, void *arg)
+static void ila_free_node(struct ila_map *ila)
 {
-       struct ila_map *ila = (struct ila_map *)ptr, *next;
+       struct ila_map *next;
 
        /* Assume rcu_readlock held */
        while (ila) {
@@ -208,6 +176,11 @@ static void ila_free_cb(void *ptr, void *arg)
        }
 }
 
+static void ila_free_cb(void *ptr, void *arg)
+{
+       ila_free_node((struct ila_map *)ptr);
+}
+
 static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila);
 
 static unsigned int
@@ -235,7 +208,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
        spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
        int err = 0, order;
 
-       if (!ilan->hooks_registered) {
+       if (!ilan->xlat.hooks_registered) {
                /* We defer registering net hooks in the namespace until the
                 * first mapping is added.
                 */
@@ -244,7 +217,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
                if (err)
                        return err;
 
-               ilan->hooks_registered = true;
+               ilan->xlat.hooks_registered = true;
        }
 
        ila = kzalloc(sizeof(*ila), GFP_KERNEL);
@@ -259,12 +232,12 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
 
        spin_lock(lock);
 
-       head = rhashtable_lookup_fast(&ilan->rhash_table,
+       head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
                                      &xp->ip.locator_match,
                                      rht_params);
        if (!head) {
                /* New entry for the rhash_table */
-               err = rhashtable_lookup_insert_fast(&ilan->rhash_table,
+               err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table,
                                                    &ila->node, rht_params);
        } else {
                struct ila_map *tila = head, *prev = NULL;
@@ -290,7 +263,7 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
                } else {
                        /* Make this ila new head */
                        RCU_INIT_POINTER(ila->next, head);
-                       err = rhashtable_replace_fast(&ilan->rhash_table,
+                       err = rhashtable_replace_fast(&ilan->xlat.rhash_table,
                                                      &head->node,
                                                      &ila->node, rht_params);
                        if (err)
@@ -316,7 +289,7 @@ static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
 
        spin_lock(lock);
 
-       head = rhashtable_lookup_fast(&ilan->rhash_table,
+       head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
                                      &xp->ip.locator_match, rht_params);
        ila = head;
 
@@ -346,15 +319,15 @@ static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
                                 * table
                                 */
                                err = rhashtable_replace_fast(
-                                       &ilan->rhash_table, &ila->node,
+                                       &ilan->xlat.rhash_table, &ila->node,
                                        &head->node, rht_params);
                                if (err)
                                        goto out;
                        } else {
                                /* Entry no longer used */
-                               err = rhashtable_remove_fast(&ilan->rhash_table,
-                                                            &ila->node,
-                                                            rht_params);
+                               err = rhashtable_remove_fast(
+                                               &ilan->xlat.rhash_table,
+                                               &ila->node, rht_params);
                        }
                }
 
@@ -369,7 +342,7 @@ static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
        return err;
 }
 
-static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
+int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = genl_info_net(info);
        struct ila_xlat_params p;
@@ -382,7 +355,7 @@ static int ila_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
        return ila_add_mapping(net, &p);
 }
 
-static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
+int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = genl_info_net(info);
        struct ila_xlat_params xp;
@@ -397,6 +370,59 @@ static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
        return 0;
 }
 
+static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan,
+                                           struct ila_map *ila)
+{
+       return ila_get_lock(ilan, ila->xp.ip.locator_match);
+}
+
+int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
+{
+       struct net *net = genl_info_net(info);
+       struct ila_net *ilan = net_generic(net, ila_net_id);
+       struct rhashtable_iter iter;
+       struct ila_map *ila;
+       spinlock_t *lock;
+       int ret;
+
+       ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter, GFP_KERNEL);
+       if (ret)
+               goto done;
+
+       rhashtable_walk_start(&iter);
+
+       for (;;) {
+               ila = rhashtable_walk_next(&iter);
+
+               if (IS_ERR(ila)) {
+                       if (PTR_ERR(ila) == -EAGAIN)
+                               continue;
+                       ret = PTR_ERR(ila);
+                       goto done;
+               } else if (!ila) {
+                       break;
+               }
+
+               lock = lock_from_ila_map(ilan, ila);
+
+               spin_lock(lock);
+
+               ret = rhashtable_remove_fast(&ilan->xlat.rhash_table,
+                                            &ila->node, rht_params);
+               if (!ret)
+                       ila_free_node(ila);
+
+               spin_unlock(lock);
+
+               if (ret)
+                       break;
+       }
+
+done:
+       rhashtable_walk_stop(&iter);
+       return ret;
+}
+
 static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
 {
        if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
@@ -434,7 +460,7 @@ static int ila_dump_info(struct ila_map *ila,
        return -EMSGSIZE;
 }
 
-static int ila_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
+int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = genl_info_net(info);
        struct ila_net *ilan = net_generic(net, ila_net_id);
@@ -475,27 +501,34 @@ static int ila_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
 
 struct ila_dump_iter {
        struct rhashtable_iter rhiter;
+       int skip;
 };
 
-static int ila_nl_dump_start(struct netlink_callback *cb)
+int ila_xlat_nl_dump_start(struct netlink_callback *cb)
 {
        struct net *net = sock_net(cb->skb->sk);
        struct ila_net *ilan = net_generic(net, ila_net_id);
-       struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
+       struct ila_dump_iter *iter;
+       int ret;
 
-       if (!iter) {
-               iter = kmalloc(sizeof(*iter), GFP_KERNEL);
-               if (!iter)
-                       return -ENOMEM;
+       iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+       if (!iter)
+               return -ENOMEM;
 
-               cb->args[0] = (long)iter;
+       ret = rhashtable_walk_init(&ilan->xlat.rhash_table, &iter->rhiter,
+                                  GFP_KERNEL);
+       if (ret) {
+               kfree(iter);
+               return ret;
        }
 
-       return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter,
-                                   GFP_KERNEL);
+       iter->skip = 0;
+       cb->args[0] = (long)iter;
+
+       return ret;
 }
 
-static int ila_nl_dump_done(struct netlink_callback *cb)
+int ila_xlat_nl_dump_done(struct netlink_callback *cb)
 {
        struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
 
@@ -506,24 +539,49 @@ static int ila_nl_dump_done(struct netlink_callback *cb)
        return 0;
 }
 
-static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
+int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
        struct rhashtable_iter *rhiter = &iter->rhiter;
+       int skip = iter->skip;
        struct ila_map *ila;
        int ret;
 
        rhashtable_walk_start(rhiter);
 
-       for (;;) {
-               ila = rhashtable_walk_next(rhiter);
+       /* Get first entry */
+       ila = rhashtable_walk_peek(rhiter);
+
+       if (ila && !IS_ERR(ila) && skip) {
+               /* Skip over visited entries */
 
+               while (ila && skip) {
+                       /* Skip over any ila entries in this list that we
+                        * have already dumped.
+                        */
+                       ila = rcu_access_pointer(ila->next);
+                       skip--;
+               }
+       }
+
+       skip = 0;
+
+       for (;;) {
                if (IS_ERR(ila)) {
-                       if (PTR_ERR(ila) == -EAGAIN)
-                               continue;
                        ret = PTR_ERR(ila);
-                       goto done;
+                       if (ret == -EAGAIN) {
+                               /* Table has changed and iter has reset. Return
+                                * -EAGAIN to the application even if we have
+                                * written data to the skb. The application
+                                * needs to deal with this.
+                                */
+
+                               goto out_ret;
+                       } else {
+                               break;
+                       }
                } else if (!ila) {
+                       ret = 0;
                        break;
                }
 
@@ -532,90 +590,54 @@ static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                             cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                             skb, ILA_CMD_GET);
                        if (ret)
-                               goto done;
+                               goto out;
 
+                       skip++;
                        ila = rcu_access_pointer(ila->next);
                }
+
+               skip = 0;
+               ila = rhashtable_walk_next(rhiter);
        }
 
-       ret = skb->len;
+out:
+       iter->skip = skip;
+       ret = (skb->len ? : ret);
 
-done:
+out_ret:
        rhashtable_walk_stop(rhiter);
        return ret;
 }
 
-static const struct genl_ops ila_nl_ops[] = {
-       {
-               .cmd = ILA_CMD_ADD,
-               .doit = ila_nl_cmd_add_mapping,
-               .policy = ila_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-       },
-       {
-               .cmd = ILA_CMD_DEL,
-               .doit = ila_nl_cmd_del_mapping,
-               .policy = ila_nl_policy,
-               .flags = GENL_ADMIN_PERM,
-       },
-       {
-               .cmd = ILA_CMD_GET,
-               .doit = ila_nl_cmd_get_mapping,
-               .start = ila_nl_dump_start,
-               .dumpit = ila_nl_dump,
-               .done = ila_nl_dump_done,
-               .policy = ila_nl_policy,
-       },
-};
-
-static struct genl_family ila_nl_family __ro_after_init = {
-       .hdrsize        = 0,
-       .name           = ILA_GENL_NAME,
-       .version        = ILA_GENL_VERSION,
-       .maxattr        = ILA_ATTR_MAX,
-       .netnsok        = true,
-       .parallel_ops   = true,
-       .module         = THIS_MODULE,
-       .ops            = ila_nl_ops,
-       .n_ops          = ARRAY_SIZE(ila_nl_ops),
-};
-
 #define ILA_HASH_TABLE_SIZE 1024
 
-static __net_init int ila_init_net(struct net *net)
+int ila_xlat_init_net(struct net *net)
 {
-       int err;
        struct ila_net *ilan = net_generic(net, ila_net_id);
+       int err;
 
        err = alloc_ila_locks(ilan);
        if (err)
                return err;
 
-       rhashtable_init(&ilan->rhash_table, &rht_params);
+       rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
 
        return 0;
 }
 
-static __net_exit void ila_exit_net(struct net *net)
+void ila_xlat_exit_net(struct net *net)
 {
        struct ila_net *ilan = net_generic(net, ila_net_id);
 
-       rhashtable_free_and_destroy(&ilan->rhash_table, ila_free_cb, NULL);
+       rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
 
-       kvfree(ilan->locks);
+       free_bucket_spinlocks(ilan->xlat.locks);
 
-       if (ilan->hooks_registered)
+       if (ilan->xlat.hooks_registered)
                nf_unregister_net_hooks(net, ila_nf_hook_ops,
                                        ARRAY_SIZE(ila_nf_hook_ops));
 }
 
-static struct pernet_operations ila_net_ops = {
-       .init = ila_init_net,
-       .exit = ila_exit_net,
-       .id   = &ila_net_id,
-       .size = sizeof(struct ila_net),
-};
-
 static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
 {
        struct ila_map *ila;
@@ -642,28 +664,3 @@ static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
        return 0;
 }
 
-int __init ila_xlat_init(void)
-{
-       int ret;
-
-       ret = register_pernet_device(&ila_net_ops);
-       if (ret)
-               goto exit;
-
-       ret = genl_register_family(&ila_nl_family);
-       if (ret < 0)
-               goto unregister;
-
-       return 0;
-
-unregister:
-       unregister_pernet_device(&ila_net_ops);
-exit:
-       return ret;
-}
-
-void ila_xlat_fini(void)
-{
-       genl_unregister_family(&ila_nl_family);
-       unregister_pernet_device(&ila_net_ops);
-}
index 2febe26de6a150155e269da0c38e5cb1122aca8d..595ad408dba09184eb814eee1870e04c17b79f77 100644 (file)
@@ -113,9 +113,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        bool dev_match = (sk->sk_bound_dev_if == dif ||
                                          sk->sk_bound_dev_if == sdif);
 
-                       if (exact_dif && !dev_match)
+                       if (!dev_match)
                                return -1;
-                       if (sk->sk_bound_dev_if && dev_match)
+                       if (sk->sk_bound_dev_if)
                                score++;
                }
                if (sk->sk_incoming_cpu == raw_smp_processor_id())
index 39d1d487eca25faceacbc3619fc6c4c38088d62a..1fb2f3118d60c73433e09f3a71abdbf9a8d92227 100644 (file)
@@ -167,8 +167,9 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags)
        return f6i;
 }
 
-void fib6_info_destroy(struct fib6_info *f6i)
+void fib6_info_destroy_rcu(struct rcu_head *head)
 {
+       struct fib6_info *f6i = container_of(head, struct fib6_info, rcu);
        struct rt6_exception_bucket *bucket;
        struct dst_metrics *m;
 
@@ -206,7 +207,7 @@ void fib6_info_destroy(struct fib6_info *f6i)
 
        kfree(f6i);
 }
-EXPORT_SYMBOL_GPL(fib6_info_destroy);
+EXPORT_SYMBOL_GPL(fib6_info_destroy_rcu);
 
 static struct fib6_node *node_alloc(struct net *net)
 {
index 3eee7637bdfe6cb24addab46b63073c04bdf88bb..cb54a8a3c2735221ec0ee1feaa63c28d3383b5cf 100644 (file)
@@ -373,7 +373,6 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
        if (olen > 0) {
                struct msghdr msg;
                struct flowi6 flowi6;
-               struct sockcm_cookie sockc_junk;
                struct ipcm6_cookie ipc6;
 
                err = -ENOMEM;
@@ -392,7 +391,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                memset(&flowi6, 0, sizeof(flowi6));
 
                ipc6.opt = fl->opt;
-               err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6, &sockc_junk);
+               err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6);
                if (err)
                        goto done;
                err = -EINVAL;
index c8cf2fdbb13b88cc1bf6b494a75407cdc16977eb..367177786e342e64912d75e1c751378179348914 100644 (file)
@@ -990,6 +990,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
 
                dsfield = key->tos;
+               if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
+                       goto tx_err;
                md = ip_tunnel_info_opts(tun_info);
                if (!md)
                        goto tx_err;
index f08d34491eceeea4192bdb0482a61394da057476..6242682be876fd1193c8c3dac2b156265830076a 100644 (file)
 #include <net/inet_ecn.h>
 #include <net/dst_metadata.h>
 
-int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
+                               struct sk_buff *skb)
 {
        void (*edemux)(struct sk_buff *skb);
 
-       /* if ingress device is enslaved to an L3 master device pass the
-        * skb to its handler for processing
-        */
-       skb = l3mdev_ip6_rcv(skb);
-       if (!skb)
-               return NET_RX_SUCCESS;
-
        if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
                const struct inet6_protocol *ipprot;
 
@@ -67,20 +61,73 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
        }
        if (!skb_valid_dst(skb))
                ip6_route_input(skb);
+}
+
+int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+       /* if ingress device is enslaved to an L3 master device pass the
+        * skb to its handler for processing
+        */
+       skb = l3mdev_ip6_rcv(skb);
+       if (!skb)
+               return NET_RX_SUCCESS;
+       ip6_rcv_finish_core(net, sk, skb);
 
        return dst_input(skb);
 }
 
-int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+static void ip6_sublist_rcv_finish(struct list_head *head)
+{
+       struct sk_buff *skb, *next;
+
+       list_for_each_entry_safe(skb, next, head, list)
+               dst_input(skb);
+}
+
+static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
+                               struct list_head *head)
+{
+       struct dst_entry *curr_dst = NULL;
+       struct sk_buff *skb, *next;
+       struct list_head sublist;
+
+       INIT_LIST_HEAD(&sublist);
+       list_for_each_entry_safe(skb, next, head, list) {
+               struct dst_entry *dst;
+
+               list_del(&skb->list);
+               /* if ingress device is enslaved to an L3 master device pass the
+                * skb to its handler for processing
+                */
+               skb = l3mdev_ip6_rcv(skb);
+               if (!skb)
+                       continue;
+               ip6_rcv_finish_core(net, sk, skb);
+               dst = skb_dst(skb);
+               if (curr_dst != dst) {
+                       /* dispatch old sublist */
+                       if (!list_empty(&sublist))
+                               ip6_sublist_rcv_finish(&sublist);
+                       /* start new sublist */
+                       INIT_LIST_HEAD(&sublist);
+                       curr_dst = dst;
+               }
+               list_add_tail(&skb->list, &sublist);
+       }
+       /* dispatch final sublist */
+       ip6_sublist_rcv_finish(&sublist);
+}
+
+static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
+                                   struct net *net)
 {
        const struct ipv6hdr *hdr;
        u32 pkt_len;
        struct inet6_dev *idev;
-       struct net *net = dev_net(skb->dev);
 
        if (skb->pkt_type == PACKET_OTHERHOST) {
                kfree_skb(skb);
-               return NET_RX_DROP;
+               return NULL;
        }
 
        rcu_read_lock();
@@ -196,7 +243,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
                if (ipv6_parse_hopopts(skb) < 0) {
                        __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
                        rcu_read_unlock();
-                       return NET_RX_DROP;
+                       return NULL;
                }
        }
 
@@ -205,15 +252,67 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
        /* Must drop socket now because of tproxy. */
        skb_orphan(skb);
 
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
-                      net, NULL, skb, dev, NULL,
-                      ip6_rcv_finish);
+       return skb;
 err:
        __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
 drop:
        rcu_read_unlock();
        kfree_skb(skb);
-       return NET_RX_DROP;
+       return NULL;
+}
+
+int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+{
+       struct net *net = dev_net(skb->dev);
+
+       skb = ip6_rcv_core(skb, dev, net);
+       if (skb == NULL)
+               return NET_RX_DROP;
+       return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
+                      net, NULL, skb, dev, NULL,
+                      ip6_rcv_finish);
+}
+
+static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev,
+                           struct net *net)
+{
+       NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL,
+                    head, dev, NULL, ip6_rcv_finish);
+       ip6_list_rcv_finish(net, NULL, head);
+}
+
+/* Receive a list of IPv6 packets */
+void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
+                  struct net_device *orig_dev)
+{
+       struct net_device *curr_dev = NULL;
+       struct net *curr_net = NULL;
+       struct sk_buff *skb, *next;
+       struct list_head sublist;
+
+       INIT_LIST_HEAD(&sublist);
+       list_for_each_entry_safe(skb, next, head, list) {
+               struct net_device *dev = skb->dev;
+               struct net *net = dev_net(dev);
+
+               list_del(&skb->list);
+               skb = ip6_rcv_core(skb, dev, net);
+               if (skb == NULL)
+                       continue;
+
+               if (curr_dev != dev || curr_net != net) {
+                       /* dispatch old sublist */
+                       if (!list_empty(&sublist))
+                               ip6_sublist_rcv(&sublist, curr_dev, curr_net);
+                       /* start new sublist */
+                       INIT_LIST_HEAD(&sublist);
+                       curr_dev = dev;
+                       curr_net = net;
+               }
+               list_add_tail(&skb->list, &sublist);
+       }
+       /* dispatch final sublist */
+       ip6_sublist_rcv(&sublist, curr_dev, curr_net);
 }
 
 /*
index 5b3f2f89ef41c3276ef4b478683bd9ab04a1d3da..37ff4805b20c73fd3c404a0904985bae68f21f23 100644 (file)
@@ -163,11 +163,11 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph,
        return len;
 }
 
-static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
-                                        struct sk_buff *skb)
+static struct sk_buff *ipv6_gro_receive(struct list_head *head,
+                                       struct sk_buff *skb)
 {
        const struct net_offload *ops;
-       struct sk_buff **pp = NULL;
+       struct sk_buff *pp = NULL;
        struct sk_buff *p;
        struct ipv6hdr *iph;
        unsigned int nlen;
@@ -214,7 +214,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
        flush--;
        nlen = skb_network_header_len(skb);
 
-       for (p = *head; p; p = p->next) {
+       list_for_each_entry(p, head, list) {
                const struct ipv6hdr *iph2;
                __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
 
@@ -263,8 +263,8 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
        return pp;
 }
 
-static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head,
-                                              struct sk_buff *skb)
+static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
+                                             struct sk_buff *skb)
 {
        /* Common GRO receive for SIT and IP6IP6 */
 
@@ -278,8 +278,8 @@ static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head,
        return ipv6_gro_receive(head, skb);
 }
 
-static struct sk_buff **ip4ip6_gro_receive(struct sk_buff **head,
-                                          struct sk_buff *skb)
+static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
+                                         struct sk_buff *skb)
 {
        /* Common GRO receive for SIT and IP6IP6 */
 
index 021e5aef6ba31b7a9face6eb363a6409761385a7..8047fd41ba887fdb01607908c57034fdc8a5553d 100644 (file)
@@ -1219,12 +1219,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
        if (mtu < IPV6_MIN_MTU)
                return -EINVAL;
        cork->base.fragsize = mtu;
-       cork->base.gso_size = sk->sk_type == SOCK_DGRAM ? ipc6->gso_size : 0;
+       cork->base.gso_size = ipc6->gso_size;
+       cork->base.tx_flags = 0;
+       sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
 
        if (dst_allfrag(xfrm_dst_path(&rt->dst)))
                cork->base.flags |= IPCORK_ALLFRAG;
        cork->base.length = 0;
 
+       cork->base.transmit_time = ipc6->sockc.transmit_time;
+
        return 0;
 }
 
@@ -1237,8 +1241,7 @@ static int __ip6_append_data(struct sock *sk,
                             int getfrag(void *from, char *to, int offset,
                                         int len, int odd, struct sk_buff *skb),
                             void *from, int length, int transhdrlen,
-                            unsigned int flags, struct ipcm6_cookie *ipc6,
-                            const struct sockcm_cookie *sockc)
+                            unsigned int flags, struct ipcm6_cookie *ipc6)
 {
        struct sk_buff *skb, *skb_prev = NULL;
        unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
@@ -1248,7 +1251,6 @@ static int __ip6_append_data(struct sock *sk,
        int copy;
        int err;
        int offset = 0;
-       __u8 tx_flags = 0;
        u32 tskey = 0;
        struct rt6_info *rt = (struct rt6_info *)cork->dst;
        struct ipv6_txoptions *opt = v6_cork->opt;
@@ -1267,6 +1269,10 @@ static int __ip6_append_data(struct sock *sk,
        mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize;
        orig_mtu = mtu;
 
+       if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
+           sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
+               tskey = sk->sk_tskey++;
+
        hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 
        fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
@@ -1316,13 +1322,6 @@ static int __ip6_append_data(struct sock *sk,
            rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
                csummode = CHECKSUM_PARTIAL;
 
-       if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
-               sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
-               if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
-                   sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
-                       tskey = sk->sk_tskey++;
-       }
-
        /*
         * Let's try using as much space as possible.
         * Use MTU if total length of the message fits into the MTU.
@@ -1441,8 +1440,8 @@ static int __ip6_append_data(struct sock *sk,
                                    dst_exthdrlen);
 
                        /* Only the initial fragment is time stamped */
-                       skb_shinfo(skb)->tx_flags = tx_flags;
-                       tx_flags = 0;
+                       skb_shinfo(skb)->tx_flags = cork->tx_flags;
+                       cork->tx_flags = 0;
                        skb_shinfo(skb)->tskey = tskey;
                        tskey = 0;
 
@@ -1559,8 +1558,7 @@ int ip6_append_data(struct sock *sk,
                                int odd, struct sk_buff *skb),
                    void *from, int length, int transhdrlen,
                    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
-                   struct rt6_info *rt, unsigned int flags,
-                   const struct sockcm_cookie *sockc)
+                   struct rt6_info *rt, unsigned int flags)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1588,7 +1586,7 @@ int ip6_append_data(struct sock *sk,
 
        return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
                                 &np->cork, sk_page_frag(sk), getfrag,
-                                from, length, transhdrlen, flags, ipc6, sockc);
+                                from, length, transhdrlen, flags, ipc6);
 }
 EXPORT_SYMBOL_GPL(ip6_append_data);
 
@@ -1672,6 +1670,8 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
 
+       skb->tstamp = cork->base.transmit_time;
+
        skb_dst_set(skb, dst_clone(&rt->dst));
        IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
        if (proto == IPPROTO_ICMPV6) {
@@ -1746,8 +1746,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
                             void *from, int length, int transhdrlen,
                             struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
                             struct rt6_info *rt, unsigned int flags,
-                            struct inet_cork_full *cork,
-                            const struct sockcm_cookie *sockc)
+                            struct inet_cork_full *cork)
 {
        struct inet6_cork v6_cork;
        struct sk_buff_head queue;
@@ -1775,7 +1774,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
        err = __ip6_append_data(sk, fl6, &queue, &cork->base, &v6_cork,
                                &current->task_frag, getfrag, from,
                                length + exthdrlen, transhdrlen + exthdrlen,
-                               flags, ipc6, sockc);
+                               flags, ipc6);
        if (err) {
                __ip6_flush_pending_frames(sk, &queue, cork, &v6_cork);
                return ERR_PTR(err);
index 0d0f0053bb1151db200c2fdf403009fea84f4ee3..d0b7e0249c133619fbb081881e054cab393ebb49 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/compat.h>
+#include <linux/rhashtable.h>
 #include <net/protocol.h>
 #include <linux/skbuff.h>
 #include <net/raw.h>
index 4d780c7f013060732dda2db760d7ba0474c812e3..fabe3ba1bddcb0ed92f9f0172ddff946786796e6 100644 (file)
@@ -489,7 +489,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                struct ipv6_txoptions *opt = NULL;
                struct msghdr msg;
                struct flowi6 fl6;
-               struct sockcm_cookie sockc_junk;
                struct ipcm6_cookie ipc6;
 
                memset(&fl6, 0, sizeof(fl6));
@@ -522,7 +521,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                msg.msg_control = (void *)(opt+1);
                ipc6.opt = opt;
 
-               retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6, &sockc_junk);
+               retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6);
                if (retv)
                        goto done;
 update:
index 975021df7c1cf2eae6897e3dd57ea20998f4ea90..c0c74088f2afa841de20ed3382f7c6c6087691c3 100644 (file)
@@ -2082,7 +2082,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev)
                mld_send_initial_cr(idev);
                idev->mc_dad_count--;
                if (idev->mc_dad_count)
-                       mld_dad_start_timer(idev, idev->mc_maxdelay);
+                       mld_dad_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
 }
 
@@ -2094,7 +2095,8 @@ static void mld_dad_timer_expire(struct timer_list *t)
        if (idev->mc_dad_count) {
                idev->mc_dad_count--;
                if (idev->mc_dad_count)
-                       mld_dad_start_timer(idev, idev->mc_maxdelay);
+                       mld_dad_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
        in6_dev_put(idev);
 }
@@ -2452,7 +2454,8 @@ static void mld_ifc_timer_expire(struct timer_list *t)
        if (idev->mc_ifc_count) {
                idev->mc_ifc_count--;
                if (idev->mc_ifc_count)
-                       mld_ifc_start_timer(idev, idev->mc_maxdelay);
+                       mld_ifc_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
        in6_dev_put(idev);
 }
index 5e0332014c1738999e680c1853829f384e880284..a452d99c9f5281b5e5d7e6f0162611deeb82212d 100644 (file)
@@ -107,7 +107,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
        if (hdr == NULL)
                goto err_reg;
 
-       net->nf_frag.sysctl.frags_hdr = hdr;
+       net->nf_frag_frags_hdr = hdr;
        return 0;
 
 err_reg:
@@ -121,8 +121,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
 {
        struct ctl_table *table;
 
-       table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
-       unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
+       table = net->nf_frag_frags_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->nf_frag_frags_hdr);
        if (!net_eq(net, &init_net))
                kfree(table);
 }
index b397a8fe88b9391e462146391901a360969547c0..c6bf580d0f331d8e22df15365c7084d9a90899ec 100644 (file)
@@ -36,7 +36,7 @@ static const struct nf_loginfo default_loginfo = {
 };
 
 /* One level of recursion won't kill us */
-static void dump_ipv6_packet(struct nf_log_buf *m,
+static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
                             const struct nf_loginfo *info,
                             const struct sk_buff *skb, unsigned int ip6hoff,
                             int recurse)
@@ -258,7 +258,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
                        /* Max length: 3+maxlen */
                        if (recurse) {
                                nf_log_buf_add(m, "[");
-                               dump_ipv6_packet(m, info, skb,
+                               dump_ipv6_packet(net, m, info, skb,
                                                 ptr + sizeof(_icmp6h), 0);
                                nf_log_buf_add(m, "] ");
                        }
@@ -278,7 +278,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
 
        /* Max length: 15 "UID=4294967295 " */
        if ((logflags & NF_LOG_UID) && recurse)
-               nf_log_dump_sk_uid_gid(m, skb->sk);
+               nf_log_dump_sk_uid_gid(net, m, skb->sk);
 
        /* Max length: 16 "MARK=0xFFFFFFFF " */
        if (recurse && skb->mark)
@@ -365,7 +365,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
        if (in != NULL)
                dump_ipv6_mac_header(m, loginfo, skb);
 
-       dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
+       dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1);
 
        nf_log_buf_close(m);
 }
index 96f56bf49a30533ae693f11de286ff310a9fe356..4c04bccc74171058760b528f30ae917326e3b301 100644 (file)
@@ -62,7 +62,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        struct dst_entry *dst;
        struct rt6_info *rt;
        struct pingfakehdr pfh;
-       struct sockcm_cookie junk = {0};
        struct ipcm6_cookie ipc6;
 
        pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
@@ -119,7 +118,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        fl6.fl6_icmp_code = user_icmph.icmp6_code;
        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-       ipc6.tclass = np->tclass;
+       ipcm6_init_sk(&ipc6, np);
        fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
 
        dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false);
@@ -142,13 +141,11 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        pfh.family = AF_INET6;
 
        ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
-       ipc6.dontfrag = np->dontfrag;
-       ipc6.opt = NULL;
 
        lock_sock(sk);
        err = ip6_append_data(sk, ping_getfrag, &pfh, len,
                              0, &ipc6, &fl6, rt,
-                             MSG_DONTWAIT, &junk);
+                             MSG_DONTWAIT);
 
        if (err) {
                ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
index ce6f0d15b5dd5d8a9531a8316a932d3d30a3491b..413d98bf24f4c9f9644b79590369b9188713926e 100644 (file)
@@ -620,7 +620,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
 
 static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
                        struct flowi6 *fl6, struct dst_entry **dstp,
-                       unsigned int flags)
+                       unsigned int flags, const struct sockcm_cookie *sockc)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct net *net = sock_net(sk);
@@ -650,6 +650,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
        skb->protocol = htons(ETH_P_IPV6);
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
+       skb->tstamp = sockc->transmit_time;
        skb_dst_set(skb, &rt->dst);
        *dstp = NULL;
 
@@ -766,7 +767,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        struct dst_entry *dst = NULL;
        struct raw6_frag_vec rfv;
        struct flowi6 fl6;
-       struct sockcm_cookie sockc;
        struct ipcm6_cookie ipc6;
        int addr_len = msg->msg_namelen;
        u16 proto;
@@ -790,10 +790,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        fl6.flowi6_mark = sk->sk_mark;
        fl6.flowi6_uid = sk->sk_uid;
 
-       ipc6.hlimit = -1;
-       ipc6.tclass = -1;
-       ipc6.dontfrag = -1;
-       ipc6.opt = NULL;
+       ipcm6_init(&ipc6);
+       ipc6.sockc.tsflags = sk->sk_tsflags;
 
        if (sin6) {
                if (addr_len < SIN6_LEN_RFC2133)
@@ -847,14 +845,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        if (fl6.flowi6_oif == 0)
                fl6.flowi6_oif = sk->sk_bound_dev_if;
 
-       sockc.tsflags = sk->sk_tsflags;
        if (msg->msg_controllen) {
                opt = &opt_space;
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
                ipc6.opt = opt;
 
-               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, &sockc);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -921,13 +918,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
 back_from_confirm:
        if (inet->hdrincl)
-               err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
+               err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
+                                       msg->msg_flags, &ipc6.sockc);
        else {
                ipc6.opt = opt;
                lock_sock(sk);
                err = ip6_append_data(sk, raw6_getfrag, &rfv,
                        len, 0, &ipc6, &fl6, (struct rt6_info *)dst,
-                       msg->msg_flags, &sockc);
+                       msg->msg_flags);
 
                if (err)
                        ip6_flush_pending_frames(sk);
@@ -1334,7 +1332,7 @@ void raw6_proc_exit(void)
 }
 #endif /* CONFIG_PROC_FS */
 
-/* Same as inet6_dgram_ops, sans udp_poll_mask.  */
+/* Same as inet6_dgram_ops, sans udp_poll.  */
 const struct proto_ops inet6_sockraw_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
@@ -1344,7 +1342,7 @@ const struct proto_ops inet6_sockraw_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = sock_no_accept,            /* a do nothing */
        .getname           = inet6_getname,
-       .poll_mask         = datagram_poll_mask,        /* ok           */
+       .poll              = datagram_poll,             /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = sock_no_listen,            /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
index 0fdf2a55e746c9b66c018c2e357b473759afd83e..8d0ba757a46ce52bf85d5e88d43567b5bd96678d 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/net.h>
 #include <linux/in6.h>
 #include <linux/slab.h>
+#include <linux/rhashtable.h>
 
 #include <net/ipv6.h>
 #include <net/protocol.h>
index 33fb35cbfac132b1a85cd2c9ce62b4344cbe8afe..8546f94f30d47a203f3d2f4d73f3ece462bb4e4e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/icmpv6.h>
 #include <linux/mroute6.h>
 #include <linux/slab.h>
+#include <linux/rhashtable.h>
 
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
@@ -373,7 +374,7 @@ static int seg6_hmac_init_algo(void)
                        return -ENOMEM;
 
                for_each_possible_cpu(cpu) {
-                       tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL);
+                       tfm = crypto_alloc_shash(algo->name, 0, 0);
                        if (IS_ERR(tfm))
                                return PTR_ERR(tfm);
                        p_tfm = per_cpu_ptr(algo->tfms, cpu);
index 278e49cd67d4e2c7b0ab9138fabe84753d628b5a..e72947c99454e54fefee30efa8aeea9bc13908b5 100644 (file)
@@ -15,8 +15,8 @@
 #include <net/ip6_checksum.h>
 #include "ip6_offload.h"
 
-static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
-                                        struct sk_buff *skb)
+static struct sk_buff *tcp6_gro_receive(struct list_head *head,
+                                       struct sk_buff *skb)
 {
        /* Don't bother verifying checksum if we're going to flush anyway. */
        if (!NAPI_GRO_CB(skb)->flush &&
index e6645cae403ed81004404049a1b95927921bfff6..f6b96956a8edf4b9160425c89b28348af0b1bda7 100644 (file)
@@ -1141,13 +1141,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int err;
        int is_udplite = IS_UDPLITE(sk);
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
-       struct sockcm_cookie sockc;
 
-       ipc6.hlimit = -1;
-       ipc6.tclass = -1;
-       ipc6.dontfrag = -1;
+       ipcm6_init(&ipc6);
        ipc6.gso_size = up->gso_size;
-       sockc.tsflags = sk->sk_tsflags;
+       ipc6.sockc.tsflags = sk->sk_tsflags;
 
        /* destination address check */
        if (sin6) {
@@ -1282,7 +1279,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
                if (err > 0)
                        err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
-                                                   &ipc6, &sockc);
+                                                   &ipc6);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -1376,7 +1373,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                skb = ip6_make_skb(sk, getfrag, msg, ulen,
                                   sizeof(struct udphdr), &ipc6,
                                   &fl6, (struct rt6_info *)dst,
-                                  msg->msg_flags, &cork, &sockc);
+                                  msg->msg_flags, &cork);
                err = PTR_ERR(skb);
                if (!IS_ERR_OR_NULL(skb))
                        err = udp_v6_send_skb(skb, &fl6, &cork.base);
@@ -1402,7 +1399,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        up->len += ulen;
        err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
                              &ipc6, &fl6, (struct rt6_info *)dst,
-                             corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, &sockc);
+                             corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
        if (err)
                udp_v6_flush_pending_frames(sk);
        else if (!corkreq)
index 03a2ff3fe1e697e752e2aa9f13703b6feaff0453..95dee9ca8d22186486b09ef7514ec69e0985ff3a 100644 (file)
@@ -114,8 +114,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
        return segs;
 }
 
-static struct sk_buff **udp6_gro_receive(struct sk_buff **head,
-                                        struct sk_buff *skb)
+static struct sk_buff *udp6_gro_receive(struct list_head *head,
+                                       struct sk_buff *skb)
 {
        struct udphdr *uh = udp_gro_udphdr(skb);
 
index 68e86257a549988b5f87098b24c8e3d0bd1dc1ce..893a022f962081416fa1b9e5f96416a8c2e92e5c 100644 (file)
@@ -1488,11 +1488,14 @@ static inline __poll_t iucv_accept_poll(struct sock *parent)
        return 0;
 }
 
-static __poll_t iucv_sock_poll_mask(struct socket *sock, __poll_t events)
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+                           poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == IUCV_LISTEN)
                return iucv_accept_poll(sk);
 
@@ -2385,7 +2388,7 @@ static const struct proto_ops iucv_sock_ops = {
        .getname        = iucv_sock_getname,
        .sendmsg        = iucv_sock_sendmsg,
        .recvmsg        = iucv_sock_recvmsg,
-       .poll_mask      = iucv_sock_poll_mask,
+       .poll           = iucv_sock_poll,
        .ioctl          = sock_no_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 84b7d5c6fec81a7c62ed4744d48726dee8c7e426..d3601d421571b9825ff0a6cea9b75cb52fd51dea 100644 (file)
@@ -1336,9 +1336,9 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
        struct list_head *head;
        int index = 0;
 
-       /* For SOCK_SEQPACKET sock type, datagram_poll_mask checks the sk_state,
-        * so  we set sk_state, otherwise epoll_wait always returns right away
-        * with EPOLLHUP
+       /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
+        * we set sk_state, otherwise epoll_wait always returns right away with
+        * EPOLLHUP
         */
        kcm->sk.sk_state = TCP_ESTABLISHED;
 
@@ -1903,7 +1903,7 @@ static const struct proto_ops kcm_dgram_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      sock_no_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        kcm_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
@@ -1924,7 +1924,7 @@ static const struct proto_ops kcm_seqpacket_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      sock_no_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        kcm_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index 8bdc1cbe490a4ae819db32851ea6a8184b0727b0..5e1d2946ffbf2a2cf4e65db44658c7f374e72e25 100644 (file)
@@ -3751,7 +3751,7 @@ static const struct proto_ops pfkey_ops = {
 
        /* Now the operations that really occur. */
        .release        =       pfkey_release,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .sendmsg        =       pfkey_sendmsg,
        .recvmsg        =       pfkey_recvmsg,
 };
index 40261cb68e83686c73a3567062fbf28f4a3d3146..1ea285bad84b1664959bfbf44c8ca5f62e142d06 100644 (file)
@@ -322,8 +322,7 @@ int l2tp_session_register(struct l2tp_session *session,
 
        if (tunnel->version == L2TP_HDR_VER_3) {
                pn = l2tp_pernet(tunnel->l2tp_net);
-               g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
-                                               session->session_id);
+               g_head = l2tp_session_id_hash_2(pn, session->session_id);
 
                spin_lock_bh(&pn->l2tp_session_hlist_lock);
 
@@ -783,7 +782,7 @@ EXPORT_SYMBOL(l2tp_recv_common);
 
 /* Drop skbs from the session's reorder_q
  */
-int l2tp_session_queue_purge(struct l2tp_session *session)
+static int l2tp_session_queue_purge(struct l2tp_session *session)
 {
        struct sk_buff *skb = NULL;
        BUG_ON(!session);
@@ -794,7 +793,6 @@ int l2tp_session_queue_purge(struct l2tp_session *session)
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
 
 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
  * here. The skb is not on a list when we get here.
@@ -1009,8 +1007,8 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
        return bufp - optr;
 }
 
-static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
-                         struct flowi *fl, size_t data_len)
+static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
+                          struct flowi *fl, size_t data_len)
 {
        struct l2tp_tunnel *tunnel = session->tunnel;
        unsigned int len = skb->len;
@@ -1052,8 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
                atomic_long_inc(&tunnel->stats.tx_errors);
                atomic_long_inc(&session->stats.tx_errors);
        }
-
-       return 0;
 }
 
 /* If caller requires the skb to have a ppp header, the header must be
@@ -1193,7 +1189,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
 
 /* When the tunnel is closed, all the attached sessions need to go too.
  */
-void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
 {
        int hash;
        struct hlist_node *walk;
@@ -1242,7 +1238,6 @@ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
        }
        write_unlock_bh(&tunnel->hlist_lock);
 }
-EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
 
 /* Tunnel socket destroy hook for UDP encapsulation */
 static void l2tp_udp_encap_destroy(struct sock *sk)
index c199020f8a8a315590a53f89aaf9370debcb09db..a5c09d3a569877dbd945e105d3ad4c3d8520a514 100644 (file)
@@ -180,9 +180,6 @@ struct l2tp_tunnel {
        struct net              *l2tp_net;      /* the net we belong to */
 
        refcount_t              ref_count;
-#ifdef CONFIG_DEBUG_FS
-       void (*show)(struct seq_file *m, void *arg);
-#endif
        int (*recv_payload_hook)(struct sk_buff *skb);
        void (*old_sk_destruct)(struct sock *);
        struct sock             *sock;          /* Parent socket */
@@ -190,8 +187,6 @@ struct l2tp_tunnel {
                                                 * was created by userspace */
 
        struct work_struct      del_work;
-
-       uint8_t                 priv[0];        /* private data */
 };
 
 struct l2tp_nl_cmd_ops {
@@ -201,11 +196,6 @@ struct l2tp_nl_cmd_ops {
        int (*session_delete)(struct l2tp_session *session);
 };
 
-static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel)
-{
-       return &tunnel->priv[0];
-}
-
 static inline void *l2tp_session_priv(struct l2tp_session *session)
 {
        return &session->priv[0];
@@ -229,7 +219,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
                         struct l2tp_tunnel_cfg *cfg);
 
-void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
 struct l2tp_session *l2tp_session_create(int priv_size,
                                         struct l2tp_tunnel *tunnel,
@@ -244,7 +233,6 @@ void l2tp_session_free(struct l2tp_session *session);
 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
                      int length, int (*payload_hook)(struct sk_buff *skb));
-int l2tp_session_queue_purge(struct l2tp_session *session);
 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
 void l2tp_session_set_header_len(struct l2tp_session *session, int version);
 
index e87686f7d63ca7d98f9bcb60e72d36a684324d21..b5d7dde003ef9f2fa6c39bf5940ee523cbc7ea53 100644 (file)
@@ -177,9 +177,6 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
                   atomic_long_read(&tunnel->stats.rx_packets),
                   atomic_long_read(&tunnel->stats.rx_bytes),
                   atomic_long_read(&tunnel->stats.rx_errors));
-
-       if (tunnel->show != NULL)
-               tunnel->show(m, tunnel);
 }
 
 static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
index 181073bf69251392c3a7fd23197a278f37dd67f0..a9c05b2bc1b0bc3471bbf62dc3b7c11e971a7f08 100644 (file)
@@ -613,7 +613,7 @@ static const struct proto_ops l2tp_ip_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = l2tp_ip_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index 336e4c00abbcdaef7385c90e24d2088131efe095..672e5b753738978893a4f15404fec4b70c3fabde 100644 (file)
@@ -500,7 +500,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        struct ip6_flowlabel *flowlabel = NULL;
        struct dst_entry *dst = NULL;
        struct flowi6 fl6;
-       struct sockcm_cookie sockc_unused = {0};
        struct ipcm6_cookie ipc6;
        int addr_len = msg->msg_namelen;
        int transhdrlen = 4; /* zero session-id */
@@ -525,9 +524,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        fl6.flowi6_mark = sk->sk_mark;
        fl6.flowi6_uid = sk->sk_uid;
 
-       ipc6.hlimit = -1;
-       ipc6.tclass = -1;
-       ipc6.dontfrag = -1;
+       ipcm6_init(&ipc6);
 
        if (lsa) {
                if (addr_len < SIN6_LEN_RFC2133)
@@ -575,8 +572,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                opt->tot_len = sizeof(struct ipv6_txoptions);
                ipc6.opt = opt;
 
-               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6,
-                                           &sockc_unused);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -641,7 +637,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        err = ip6_append_data(sk, ip_generic_getfrag, msg,
                              ulen, transhdrlen, &ipc6,
                              &fl6, (struct rt6_info *)dst,
-                             msg->msg_flags, &sockc_unused);
+                             msg->msg_flags);
        if (err)
                ip6_flush_pending_frames(sk);
        else if (!(msg->msg_flags & MSG_MORE))
@@ -754,7 +750,7 @@ static const struct proto_ops l2tp_ip6_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = l2tp_ip6_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet6_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index 55188382845c310c98eb86cdfc3b78e1d03e8e0f..9ac02c93df98cf6a7fd5b5136c17824a40d9884a 100644 (file)
@@ -424,12 +424,6 @@ static void pppol2tp_put_sk(struct rcu_head *head)
        sock_put(ps->__sk);
 }
 
-/* Called by l2tp_core when a session socket is being closed.
- */
-static void pppol2tp_session_close(struct l2tp_session *session)
-{
-}
-
 /* Really kill the session socket. (Called from sock_put() if
  * refcnt == 0.)
  */
@@ -573,7 +567,6 @@ static void pppol2tp_session_init(struct l2tp_session *session)
        struct dst_entry *dst;
 
        session->recv_skb = pppol2tp_recv;
-       session->session_close = pppol2tp_session_close;
 #if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
        session->show = pppol2tp_show;
 #endif
@@ -595,40 +588,113 @@ static void pppol2tp_session_init(struct l2tp_session *session)
        }
 }
 
+struct l2tp_connect_info {
+       u8 version;
+       int fd;
+       u32 tunnel_id;
+       u32 peer_tunnel_id;
+       u32 session_id;
+       u32 peer_session_id;
+};
+
+static int pppol2tp_sockaddr_get_info(const void *sa, int sa_len,
+                                     struct l2tp_connect_info *info)
+{
+       switch (sa_len) {
+       case sizeof(struct sockaddr_pppol2tp):
+       {
+               const struct sockaddr_pppol2tp *sa_v2in4 = sa;
+
+               if (sa_v2in4->sa_protocol != PX_PROTO_OL2TP)
+                       return -EINVAL;
+
+               info->version = 2;
+               info->fd = sa_v2in4->pppol2tp.fd;
+               info->tunnel_id = sa_v2in4->pppol2tp.s_tunnel;
+               info->peer_tunnel_id = sa_v2in4->pppol2tp.d_tunnel;
+               info->session_id = sa_v2in4->pppol2tp.s_session;
+               info->peer_session_id = sa_v2in4->pppol2tp.d_session;
+
+               break;
+       }
+       case sizeof(struct sockaddr_pppol2tpv3):
+       {
+               const struct sockaddr_pppol2tpv3 *sa_v3in4 = sa;
+
+               if (sa_v3in4->sa_protocol != PX_PROTO_OL2TP)
+                       return -EINVAL;
+
+               info->version = 3;
+               info->fd = sa_v3in4->pppol2tp.fd;
+               info->tunnel_id = sa_v3in4->pppol2tp.s_tunnel;
+               info->peer_tunnel_id = sa_v3in4->pppol2tp.d_tunnel;
+               info->session_id = sa_v3in4->pppol2tp.s_session;
+               info->peer_session_id = sa_v3in4->pppol2tp.d_session;
+
+               break;
+       }
+       case sizeof(struct sockaddr_pppol2tpin6):
+       {
+               const struct sockaddr_pppol2tpin6 *sa_v2in6 = sa;
+
+               if (sa_v2in6->sa_protocol != PX_PROTO_OL2TP)
+                       return -EINVAL;
+
+               info->version = 2;
+               info->fd = sa_v2in6->pppol2tp.fd;
+               info->tunnel_id = sa_v2in6->pppol2tp.s_tunnel;
+               info->peer_tunnel_id = sa_v2in6->pppol2tp.d_tunnel;
+               info->session_id = sa_v2in6->pppol2tp.s_session;
+               info->peer_session_id = sa_v2in6->pppol2tp.d_session;
+
+               break;
+       }
+       case sizeof(struct sockaddr_pppol2tpv3in6):
+       {
+               const struct sockaddr_pppol2tpv3in6 *sa_v3in6 = sa;
+
+               if (sa_v3in6->sa_protocol != PX_PROTO_OL2TP)
+                       return -EINVAL;
+
+               info->version = 3;
+               info->fd = sa_v3in6->pppol2tp.fd;
+               info->tunnel_id = sa_v3in6->pppol2tp.s_tunnel;
+               info->peer_tunnel_id = sa_v3in6->pppol2tp.d_tunnel;
+               info->session_id = sa_v3in6->pppol2tp.s_session;
+               info->peer_session_id = sa_v3in6->pppol2tp.d_session;
+
+               break;
+       }
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
  */
 static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
                            int sockaddr_len, int flags)
 {
        struct sock *sk = sock->sk;
-       struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
        struct pppox_sock *po = pppox_sk(sk);
        struct l2tp_session *session = NULL;
+       struct l2tp_connect_info info;
        struct l2tp_tunnel *tunnel;
        struct pppol2tp_session *ps;
        struct l2tp_session_cfg cfg = { 0, };
-       int error = 0;
-       u32 tunnel_id, peer_tunnel_id;
-       u32 session_id, peer_session_id;
        bool drop_refcnt = false;
        bool drop_tunnel = false;
        bool new_session = false;
        bool new_tunnel = false;
-       int ver = 2;
-       int fd;
-
-       lock_sock(sk);
-
-       error = -EINVAL;
+       int error;
 
-       if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
-           sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
-           sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
-           sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
-               goto end;
+       error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info);
+       if (error < 0)
+               return error;
 
-       if (sp->sa_protocol != PX_PROTO_OL2TP)
-               goto end;
+       lock_sock(sk);
 
        /* Check for already bound sockets */
        error = -EBUSY;
@@ -640,56 +706,12 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
        if (sk->sk_user_data)
                goto end; /* socket is already attached */
 
-       /* Get params from socket address. Handle L2TPv2 and L2TPv3.
-        * This is nasty because there are different sockaddr_pppol2tp
-        * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use
-        * the sockaddr size to determine which structure the caller
-        * is using.
-        */
-       peer_tunnel_id = 0;
-       if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
-               fd = sp->pppol2tp.fd;
-               tunnel_id = sp->pppol2tp.s_tunnel;
-               peer_tunnel_id = sp->pppol2tp.d_tunnel;
-               session_id = sp->pppol2tp.s_session;
-               peer_session_id = sp->pppol2tp.d_session;
-       } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
-               struct sockaddr_pppol2tpv3 *sp3 =
-                       (struct sockaddr_pppol2tpv3 *) sp;
-               ver = 3;
-               fd = sp3->pppol2tp.fd;
-               tunnel_id = sp3->pppol2tp.s_tunnel;
-               peer_tunnel_id = sp3->pppol2tp.d_tunnel;
-               session_id = sp3->pppol2tp.s_session;
-               peer_session_id = sp3->pppol2tp.d_session;
-       } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) {
-               struct sockaddr_pppol2tpin6 *sp6 =
-                       (struct sockaddr_pppol2tpin6 *) sp;
-               fd = sp6->pppol2tp.fd;
-               tunnel_id = sp6->pppol2tp.s_tunnel;
-               peer_tunnel_id = sp6->pppol2tp.d_tunnel;
-               session_id = sp6->pppol2tp.s_session;
-               peer_session_id = sp6->pppol2tp.d_session;
-       } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) {
-               struct sockaddr_pppol2tpv3in6 *sp6 =
-                       (struct sockaddr_pppol2tpv3in6 *) sp;
-               ver = 3;
-               fd = sp6->pppol2tp.fd;
-               tunnel_id = sp6->pppol2tp.s_tunnel;
-               peer_tunnel_id = sp6->pppol2tp.d_tunnel;
-               session_id = sp6->pppol2tp.s_session;
-               peer_session_id = sp6->pppol2tp.d_session;
-       } else {
-               error = -EINVAL;
-               goto end; /* bad socket address */
-       }
-
        /* Don't bind if tunnel_id is 0 */
        error = -EINVAL;
-       if (tunnel_id == 0)
+       if (!info.tunnel_id)
                goto end;
 
-       tunnel = l2tp_tunnel_get(sock_net(sk), tunnel_id);
+       tunnel = l2tp_tunnel_get(sock_net(sk), info.tunnel_id);
        if (tunnel)
                drop_tunnel = true;
 
@@ -697,7 +719,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
         * peer_session_id is 0. Otherwise look up tunnel using supplied
         * tunnel id.
         */
-       if ((session_id == 0) && (peer_session_id == 0)) {
+       if (!info.session_id && !info.peer_session_id) {
                if (tunnel == NULL) {
                        struct l2tp_tunnel_cfg tcfg = {
                                .encap = L2TP_ENCAPTYPE_UDP,
@@ -707,12 +729,16 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
                        /* Prevent l2tp_tunnel_register() from trying to set up
                         * a kernel socket.
                         */
-                       if (fd < 0) {
+                       if (info.fd < 0) {
                                error = -EBADF;
                                goto end;
                        }
 
-                       error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
+                       error = l2tp_tunnel_create(sock_net(sk), info.fd,
+                                                  info.version,
+                                                  info.tunnel_id,
+                                                  info.peer_tunnel_id, &tcfg,
+                                                  &tunnel);
                        if (error < 0)
                                goto end;
 
@@ -741,9 +767,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
                tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
 
        if (tunnel->peer_tunnel_id == 0)
-               tunnel->peer_tunnel_id = peer_tunnel_id;
+               tunnel->peer_tunnel_id = info.peer_tunnel_id;
 
-       session = l2tp_session_get(sock_net(sk), tunnel, session_id);
+       session = l2tp_session_get(sock_net(sk), tunnel, info.session_id);
        if (session) {
                drop_refcnt = true;
 
@@ -772,8 +798,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
                cfg.pw_type = L2TP_PWTYPE_PPP;
 
                session = l2tp_session_create(sizeof(struct pppol2tp_session),
-                                             tunnel, session_id,
-                                             peer_session_id, &cfg);
+                                             tunnel, info.session_id,
+                                             info.peer_session_id, &cfg);
                if (IS_ERR(session)) {
                        error = PTR_ERR(session);
                        goto end;
@@ -1818,7 +1844,7 @@ static const struct proto_ops pppol2tp_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pppol2tp_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = pppol2tp_setsockopt,
index 804de84901868a4cffd2ec5d6c9e979af937cb59..1beeea9549fa6ec1f7b0e5f9af8ff3250a316f59 100644 (file)
@@ -1192,7 +1192,7 @@ static const struct proto_ops llc_ui_ops = {
        .socketpair  = sock_no_socketpair,
        .accept      = llc_ui_accept,
        .getname     = llc_ui_getname,
-       .poll_mask   = datagram_poll_mask,
+       .poll        = datagram_poll,
        .ioctl       = llc_ui_ioctl,
        .listen      = llc_ui_listen,
        .shutdown    = llc_ui_shutdown,
index e3589ade62e073a3ae91503c2cec03a3e1705f29..bb707789ef2bb21d942f44aacf3d87972abdc23a 100644 (file)
@@ -12,6 +12,7 @@ mac80211-y := \
        scan.o offchannel.o \
        ht.o agg-tx.o agg-rx.o \
        vht.o \
+       he.o \
        ibss.o \
        iface.o \
        rate.o \
index e83c19d4c292e46fce243f60fdf77557096c2af6..6a4f154c99f6b27bd5d41fb502a939c82c654a2b 100644 (file)
@@ -245,6 +245,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
        };
        int i, ret = -EOPNOTSUPP;
        u16 status = WLAN_STATUS_REQUEST_DECLINED;
+       u16 max_buf_size;
 
        if (tid >= IEEE80211_FIRST_TSPEC_TSID) {
                ht_dbg(sta->sdata,
@@ -268,13 +269,18 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
                goto end;
        }
 
+       if (sta->sta.he_cap.has_he)
+               max_buf_size = IEEE80211_MAX_AMPDU_BUF;
+       else
+               max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
+
        /* sanity check for incoming parameters:
         * check if configuration can support the BA policy
         * and if buffer size does not exceeds max value */
        /* XXX: check own ht delayed BA capability?? */
        if (((ba_policy != 1) &&
             (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
-           (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
+           (buf_size > max_buf_size)) {
                status = WLAN_STATUS_INVALID_QOS_PARAM;
                ht_dbg_ratelimited(sta->sdata,
                                   "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
@@ -283,7 +289,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
        }
        /* determine default buffer size */
        if (buf_size == 0)
-               buf_size = IEEE80211_MAX_AMPDU_BUF;
+               buf_size = max_buf_size;
 
        /* make sure the size doesn't exceed the maximum supported by the hw */
        if (buf_size > sta->sta.max_rx_aggregation_subframes)
index ac4295296514365ad1972ddc22754be1cdb8384a..69e831bc317beb666e2a836facb572e59e727a9c 100644 (file)
@@ -463,6 +463,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
                .timeout = 0,
        };
        int ret;
+       u16 buf_size;
 
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 
@@ -511,11 +512,22 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        sta->ampdu_mlme.addba_req_num[tid]++;
        spin_unlock_bh(&sta->lock);
 
+       if (sta->sta.he_cap.has_he) {
+               buf_size = local->hw.max_tx_aggregation_subframes;
+       } else {
+               /*
+                * We really should use what the driver told us it will
+                * transmit as the maximum, but certain APs (e.g. the
+                * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012)
+                * will crash when we use a lower number.
+                */
+               buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
+       }
+
        /* send AddBA request */
        ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
                                     tid_tx->dialog_token, params.ssn,
-                                    IEEE80211_MAX_AMPDU_BUF,
-                                    tid_tx->timeout);
+                                    buf_size, tid_tx->timeout);
 }
 
 /*
@@ -905,8 +917,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
 {
        struct tid_ampdu_tx *tid_tx;
        struct ieee80211_txq *txq;
-       u16 capab, tid;
-       u8 buf_size;
+       u16 capab, tid, buf_size;
        bool amsdu;
 
        capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
index bdf6fa78d0d2b101a448fe10e925f5e381486224..02f3672e7b5e2ed89046db9b0a6b65069e3940c7 100644 (file)
@@ -1412,6 +1412,11 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
                                                    params->vht_capa, sta);
 
+       if (params->he_capa)
+               ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
+                                                 (void *)params->he_capa,
+                                                 params->he_capa_len, sta);
+
        if (params->opmode_notif_used) {
                /* returned value is only needed for rc update, but the
                 * rc isn't initialized here yet, so ignore it
@@ -3486,7 +3491,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
        }
 
        local_bh_disable();
-       ieee80211_xmit(sdata, sta, skb);
+       ieee80211_xmit(sdata, sta, skb, 0);
        local_bh_enable();
 
        ret = 0;
index 690c142a7a440b0b31b27fb436942162692e9787..5ac743816b59eb416f2153887e3608c72ca89b68 100644 (file)
@@ -116,16 +116,16 @@ static void ieee80211_get_stats(struct net_device *dev,
                data[i++] = sta->sta_state;
 
 
-               if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))
+               if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))
                        data[i] = 100000ULL *
                                cfg80211_calculate_bitrate(&sinfo.txrate);
                i++;
-               if (sinfo.filled & BIT(NL80211_STA_INFO_RX_BITRATE))
+               if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))
                        data[i] = 100000ULL *
                                cfg80211_calculate_bitrate(&sinfo.rxrate);
                i++;
 
-               if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))
+               if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))
                        data[i] = (u8)sinfo.signal_avg;
                i++;
        } else {
diff --git a/net/mac80211/he.c b/net/mac80211/he.c
new file mode 100644 (file)
index 0000000..769078e
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * HE handling
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "ieee80211_i.h"
+
+void
+ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
+                                 struct ieee80211_supported_band *sband,
+                                 const u8 *he_cap_ie, u8 he_cap_len,
+                                 struct sta_info *sta)
+{
+       struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
+       struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
+       u8 he_ppe_size;
+       u8 mcs_nss_size;
+       u8 he_total_size;
+
+       memset(he_cap, 0, sizeof(*he_cap));
+
+       if (!he_cap_ie || !ieee80211_get_he_sta_cap(sband))
+               return;
+
+       /* Make sure size is OK */
+       mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+       he_ppe_size =
+               ieee80211_he_ppe_size(he_cap_ie[sizeof(he_cap->he_cap_elem) +
+                                               mcs_nss_size],
+                                     he_cap_ie_elem->phy_cap_info);
+       he_total_size = sizeof(he_cap->he_cap_elem) + mcs_nss_size +
+                       he_ppe_size;
+       if (he_cap_len < he_total_size)
+               return;
+
+       memcpy(&he_cap->he_cap_elem, he_cap_ie, sizeof(he_cap->he_cap_elem));
+
+       /* HE Tx/Rx HE MCS NSS Support Field */
+       memcpy(&he_cap->he_mcs_nss_supp,
+              &he_cap_ie[sizeof(he_cap->he_cap_elem)], mcs_nss_size);
+
+       /* Check if there are (optional) PPE Thresholds */
+       if (he_cap->he_cap_elem.phy_cap_info[6] &
+           IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
+               memcpy(he_cap->ppe_thres,
+                      &he_cap_ie[sizeof(he_cap->he_cap_elem) + mcs_nss_size],
+                      he_ppe_size);
+
+       he_cap->has_he = true;
+}
index 26a7ba3b698f1e94598350d3e03dd319d6ef10e2..f849ea814993f34d34e9485f6681659c4408d6c9 100644 (file)
@@ -352,7 +352,7 @@ void ieee80211_ba_session_work(struct work_struct *work)
                    test_and_clear_bit(tid,
                                       sta->ampdu_mlme.tid_rx_manage_offl))
                        ___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
-                                                        IEEE80211_MAX_AMPDU_BUF,
+                                                        IEEE80211_MAX_AMPDU_BUF_HT,
                                                         false, true);
 
                if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS,
index d1978aa1c15ddf4d946bae7458343f9875861451..172aeae21ae9d00bc9f43c4effc0008ff624ee6d 100644 (file)
@@ -165,6 +165,7 @@ typedef unsigned __bitwise ieee80211_tx_result;
 #define TX_DROP                ((__force ieee80211_tx_result) 1u)
 #define TX_QUEUED      ((__force ieee80211_tx_result) 2u)
 
+#define IEEE80211_TX_NO_SEQNO          BIT(0)
 #define IEEE80211_TX_UNICAST           BIT(1)
 #define IEEE80211_TX_PS_BUFFERED       BIT(2)
 
@@ -364,6 +365,7 @@ enum ieee80211_sta_flags {
        IEEE80211_STA_DISABLE_160MHZ    = BIT(13),
        IEEE80211_STA_DISABLE_WMM       = BIT(14),
        IEEE80211_STA_ENABLE_RRM        = BIT(15),
+       IEEE80211_STA_DISABLE_HE        = BIT(16),
 };
 
 struct ieee80211_mgd_auth_data {
@@ -1453,6 +1455,10 @@ struct ieee802_11_elems {
        const struct ieee80211_vht_cap *vht_cap_elem;
        const struct ieee80211_vht_operation *vht_operation;
        const struct ieee80211_meshconf_ie *mesh_config;
+       const u8 *he_cap;
+       const struct ieee80211_he_operation *he_operation;
+       const struct ieee80211_mu_edca_param_set *mu_edca_param_set;
+       const u8 *uora_element;
        const u8 *mesh_id;
        const u8 *peering;
        const __le16 *awake_window;
@@ -1482,6 +1488,7 @@ struct ieee802_11_elems {
        u8 ext_supp_rates_len;
        u8 wmm_info_len;
        u8 wmm_param_len;
+       u8 he_cap_len;
        u8 mesh_id_len;
        u8 peering_len;
        u8 preq_len;
@@ -1824,6 +1831,13 @@ void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
 enum nl80211_chan_width
 ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta);
 
+/* HE */
+void
+ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
+                                 struct ieee80211_supported_band *sband,
+                                 const u8 *he_cap_ie, u8 he_cap_len,
+                                 struct sta_info *sta);
+
 /* Spectrum management */
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
                                       struct ieee80211_mgmt *mgmt,
@@ -1880,19 +1894,20 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
                               bool bss_notify, bool enable_qos);
 void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
-                   struct sta_info *sta, struct sk_buff *skb);
+                   struct sta_info *sta, struct sk_buff *skb,
+                   u32 txdata_flags);
 
 void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
                                 struct sk_buff *skb, int tid,
-                                enum nl80211_band band);
+                                enum nl80211_band band, u32 txdata_flags);
 
 static inline void
 ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
                          struct sk_buff *skb, int tid,
-                         enum nl80211_band band)
+                         enum nl80211_band band, u32 txdata_flags)
 {
        rcu_read_lock();
-       __ieee80211_tx_skb_tid_band(sdata, skb, tid, band);
+       __ieee80211_tx_skb_tid_band(sdata, skb, tid, band, txdata_flags);
        rcu_read_unlock();
 }
 
@@ -1910,7 +1925,7 @@ static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
        }
 
        __ieee80211_tx_skb_tid_band(sdata, skb, tid,
-                                   chanctx_conf->def.chan->band);
+                                   chanctx_conf->def.chan->band, 0);
        rcu_read_unlock();
 }
 
@@ -2031,26 +2046,27 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
 void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
                                    const u8 *bssid, u16 stype, u16 reason,
                                    bool send_frame, u8 *frame_buf);
+
+enum {
+       IEEE80211_PROBE_FLAG_DIRECTED           = BIT(0),
+       IEEE80211_PROBE_FLAG_MIN_CONTENT        = BIT(1),
+       IEEE80211_PROBE_FLAG_RANDOM_SN          = BIT(2),
+};
+
 int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                             size_t buffer_len,
                             struct ieee80211_scan_ies *ie_desc,
                             const u8 *ie, size_t ie_len,
                             u8 bands_used, u32 *rate_masks,
-                            struct cfg80211_chan_def *chandef);
+                            struct cfg80211_chan_def *chandef,
+                            u32 flags);
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          const u8 *src, const u8 *dst,
                                          u32 ratemask,
                                          struct ieee80211_channel *chan,
                                          const u8 *ssid, size_t ssid_len,
                                          const u8 *ie, size_t ie_len,
-                                         bool directed);
-void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata,
-                             const u8 *src, const u8 *dst,
-                             const u8 *ssid, size_t ssid_len,
-                             const u8 *ie, size_t ie_len,
-                             u32 ratemask, bool directed, u32 tx_flags,
-                             struct ieee80211_channel *channel, bool scan);
-
+                                         u32 flags);
 u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
                            struct ieee802_11_elems *elems,
                            enum nl80211_band band, u32 *basic_rates);
@@ -2073,6 +2089,9 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
                               u32 cap);
 u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
                                const struct cfg80211_chan_def *chandef);
+u8 *ieee80211_ie_build_he_cap(u8 *pos,
+                             const struct ieee80211_sta_he_cap *he_cap,
+                             u8 *end);
 int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
                             const struct ieee80211_supported_band *sband,
                             const u8 *srates, int srates_len, u32 *rates);
index 555e389b7dfa34ebf494c9f2432fb6409eff74a9..5e6cf2cee965264dd45cda775b370b6dcb022413 100644 (file)
@@ -1130,7 +1130,7 @@ static void ieee80211_uninit(struct net_device *dev)
 
 static u16 ieee80211_netdev_select_queue(struct net_device *dev,
                                         struct sk_buff *skb,
-                                        void *accel_priv,
+                                        struct net_device *sb_dev,
                                         select_queue_fallback_t fallback)
 {
        return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
@@ -1176,7 +1176,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
 
 static u16 ieee80211_monitor_select_queue(struct net_device *dev,
                                          struct sk_buff *skb,
-                                         void *accel_priv,
+                                         struct net_device *sb_dev,
                                          select_queue_fallback_t fallback)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
index fb73451ed85ec65cd0b4b5cc3808d51d40a8dd39..4fb2709cb52796c752f052a746bd5c420d6caf08 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright 2005-2006, Devicescape Software, Inc.
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright (C) 2017     Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -557,10 +558,19 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
        wiphy_ext_feature_set(wiphy,
                              NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211);
 
-       if (!ops->hw_scan)
+       if (!ops->hw_scan) {
                wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
                                   NL80211_FEATURE_AP_SCAN;
-
+               /*
+                * if the driver behaves correctly using the probe request
+                * (template) from mac80211, then both of these should be
+                * supported even with hw scan - but let drivers opt in.
+                */
+               wiphy_ext_feature_set(wiphy,
+                                     NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
+               wiphy_ext_feature_set(wiphy,
+                                     NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
+       }
 
        if (!ops->set_key)
                wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -588,8 +598,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
        local->hw.queues = 1;
        local->hw.max_rates = 1;
        local->hw.max_report_rates = 0;
-       local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
-       local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+       local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT;
+       local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT;
        local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
        local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
        local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
@@ -816,7 +826,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        int result, i;
        enum nl80211_band band;
        int channels, max_bitrates;
-       bool supp_ht, supp_vht;
+       bool supp_ht, supp_vht, supp_he;
        netdev_features_t feature_whitelist;
        struct cfg80211_chan_def dflt_chandef = {};
 
@@ -896,6 +906,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        max_bitrates = 0;
        supp_ht = false;
        supp_vht = false;
+       supp_he = false;
        for (band = 0; band < NUM_NL80211_BANDS; band++) {
                struct ieee80211_supported_band *sband;
 
@@ -922,6 +933,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                supp_ht = supp_ht || sband->ht_cap.ht_supported;
                supp_vht = supp_vht || sband->vht_cap.vht_supported;
 
+               if (!supp_he)
+                       supp_he = !!ieee80211_get_he_sta_cap(sband);
+
                if (!sband->ht_cap.ht_supported)
                        continue;
 
@@ -1011,6 +1025,18 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                local->scan_ies_len +=
                        2 + sizeof(struct ieee80211_vht_cap);
 
+       /* HE cap element is variable in size - set len to allow max size */
+       /*
+        * TODO: 1 is added at the end of the calculation to accommodate for
+        *      the temporary placing of the HE capabilities IE under EXT.
+        *      Remove it once it is placed in the final place.
+        */
+       if (supp_he)
+               local->scan_ies_len +=
+                       2 + sizeof(struct ieee80211_he_cap_elem) +
+                       sizeof(struct ieee80211_he_mcs_nss_supp) +
+                       IEEE80211_HE_PPE_THRES_MAX_LEN + 1;
+
        if (!local->ops->hw_scan) {
                /* For hw_scan, driver needs to set these up. */
                local->hw.wiphy->max_scan_ssids = 4;
index a59187c016e08193e50078becbd169820f2efe9a..7fb9957359a3c1be557e577ba5b76cc4c1177105 100644 (file)
@@ -149,6 +149,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_channel *channel,
                             const struct ieee80211_ht_operation *ht_oper,
                             const struct ieee80211_vht_operation *vht_oper,
+                            const struct ieee80211_he_operation *he_oper,
                             struct cfg80211_chan_def *chandef, bool tracking)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -207,7 +208,27 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
        }
 
        vht_chandef = *chandef;
-       if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) {
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && he_oper &&
+           (le32_to_cpu(he_oper->he_oper_params) &
+            IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
+               struct ieee80211_vht_operation he_oper_vht_cap;
+
+               /*
+                * Set only first 3 bytes (other 2 aren't used in
+                * ieee80211_chandef_vht_oper() anyway)
+                */
+               memcpy(&he_oper_vht_cap, he_oper->optional, 3);
+               he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0);
+
+               if (!ieee80211_chandef_vht_oper(&he_oper_vht_cap,
+                                               &vht_chandef)) {
+                       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
+                               sdata_info(sdata,
+                                          "HE AP VHT information is invalid, disable HE\n");
+                       ret = IEEE80211_STA_DISABLE_HE;
+                       goto out;
+               }
+       } else if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) {
                if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                        sdata_info(sdata,
                                   "AP VHT information is invalid, disable VHT\n");
@@ -300,12 +321,14 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
                               const struct ieee80211_ht_cap *ht_cap,
                               const struct ieee80211_ht_operation *ht_oper,
                               const struct ieee80211_vht_operation *vht_oper,
+                              const struct ieee80211_he_operation *he_oper,
                               const u8 *bssid, u32 *changed)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       struct ieee80211_supported_band *sband;
-       struct ieee80211_channel *chan;
+       struct ieee80211_channel *chan = sdata->vif.bss_conf.chandef.chan;
+       struct ieee80211_supported_band *sband =
+               local->hw.wiphy->bands[chan->band];
        struct cfg80211_chan_def chandef;
        u16 ht_opmode;
        u32 flags;
@@ -320,6 +343,11 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
        if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
                vht_oper = NULL;
 
+       /* don't check HE if we associated as non-HE station */
+       if (ifmgd->flags & IEEE80211_STA_DISABLE_HE ||
+           !ieee80211_get_he_sta_cap(sband))
+               he_oper = NULL;
+
        if (WARN_ON_ONCE(!sta))
                return -EINVAL;
 
@@ -333,12 +361,9 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
                sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
        }
 
-       chan = sdata->vif.bss_conf.chandef.chan;
-       sband = local->hw.wiphy->bands[chan->band];
-
-       /* calculate new channel (type) based on HT/VHT operation IEs */
+       /* calculate new channel (type) based on HT/VHT/HE operation IEs */
        flags = ieee80211_determine_chantype(sdata, sband, chan,
-                                            ht_oper, vht_oper,
+                                            ht_oper, vht_oper, he_oper,
                                             &chandef, true);
 
        /*
@@ -582,6 +607,34 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
        ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
 }
 
+/* This function determines HE capability flags for the association
+ * and builds the IE.
+ */
+static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
+                               struct sk_buff *skb,
+                               struct ieee80211_supported_band *sband)
+{
+       u8 *pos;
+       const struct ieee80211_sta_he_cap *he_cap = NULL;
+       u8 he_cap_size;
+
+       he_cap = ieee80211_get_he_sta_cap(sband);
+       if (!he_cap)
+               return;
+
+       /*
+        * TODO: the 1 added is because this temporarily is under the EXTENSION
+        * IE. Get rid of it when it moves.
+        */
+       he_cap_size =
+               2 + 1 + sizeof(he_cap->he_cap_elem) +
+               ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
+               ieee80211_he_ppe_size(he_cap->ppe_thres[0],
+                                     he_cap->he_cap_elem.phy_cap_info);
+       pos = skb_put(skb, he_cap_size);
+       ieee80211_ie_build_he_cap(pos, he_cap, pos + he_cap_size);
+}
+
 static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
@@ -643,6 +696,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                        2 + 2 * sband->n_channels + /* supported channels */
                        2 + sizeof(struct ieee80211_ht_cap) + /* HT */
                        2 + sizeof(struct ieee80211_vht_cap) + /* VHT */
+                       2 + 1 + sizeof(struct ieee80211_he_cap_elem) + /* HE */
+                               sizeof(struct ieee80211_he_mcs_nss_supp) +
+                               IEEE80211_HE_PPE_THRES_MAX_LEN +
                        assoc_data->ie_len + /* extra IEs */
                        (assoc_data->fils_kek_len ? 16 /* AES-SIV */ : 0) +
                        9, /* WMM */
@@ -827,11 +883,41 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                offset = noffset;
        }
 
+       /* if present, add any custom IEs that go before HE */
+       if (assoc_data->ie_len) {
+               static const u8 before_he[] = {
+                       /*
+                        * no need to list the ones split off before VHT
+                        * or generated here
+                        */
+                       WLAN_EID_OPMODE_NOTIF,
+                       WLAN_EID_EXTENSION, WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE,
+                       /* 11ai elements */
+                       WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_SESSION,
+                       WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_PUBLIC_KEY,
+                       WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_KEY_CONFIRM,
+                       WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_HLP_CONTAINER,
+                       WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN,
+                       /* TODO: add 11ah/11aj/11ak elements */
+               };
+
+               /* RIC already taken above, so no need to handle here anymore */
+               noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
+                                            before_he, ARRAY_SIZE(before_he),
+                                            offset);
+               pos = skb_put(skb, noffset - offset);
+               memcpy(pos, assoc_data->ie + offset, noffset - offset);
+               offset = noffset;
+       }
+
        if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                ieee80211_add_vht_ie(sdata, skb, sband,
                                     &assoc_data->ap_vht_cap);
 
-       /* if present, add any custom non-vendor IEs that go after HT */
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
+               ieee80211_add_he_ie(sdata, skb, sband);
+
+       /* if present, add any custom non-vendor IEs that go after HE */
        if (assoc_data->ie_len) {
                noffset = ieee80211_ie_split_vendor(assoc_data->ie,
                                                    assoc_data->ie_len,
@@ -898,6 +984,11 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
        struct ieee80211_hdr_3addr *nullfunc;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
+       /* Don't send NDPs when STA is connected HE */
+       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
+               return;
+
        skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
                !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
        if (!skb)
@@ -929,6 +1020,10 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
        if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
                return;
 
+       /* Don't send NDPs when connected HE */
+       if (!(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
+               return;
+
        skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
        if (!skb)
                return;
@@ -1700,9 +1795,11 @@ static void ieee80211_sta_handle_tspec_ac_params_wk(struct work_struct *work)
 }
 
 /* MLME */
-static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
-                                    struct ieee80211_sub_if_data *sdata,
-                                    const u8 *wmm_param, size_t wmm_param_len)
+static bool
+ieee80211_sta_wmm_params(struct ieee80211_local *local,
+                        struct ieee80211_sub_if_data *sdata,
+                        const u8 *wmm_param, size_t wmm_param_len,
+                        const struct ieee80211_mu_edca_param_set *mu_edca)
 {
        struct ieee80211_tx_queue_params params[IEEE80211_NUM_ACS];
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1749,6 +1846,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
                                sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
                        if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
                                uapsd = true;
+                       params[ac].mu_edca = !!mu_edca;
+                       if (mu_edca)
+                               params[ac].mu_edca_param_rec = mu_edca->ac_bk;
                        break;
                case 2: /* AC_VI */
                        ac = IEEE80211_AC_VI;
@@ -1756,6 +1856,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
                                sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
                        if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
                                uapsd = true;
+                       params[ac].mu_edca = !!mu_edca;
+                       if (mu_edca)
+                               params[ac].mu_edca_param_rec = mu_edca->ac_vi;
                        break;
                case 3: /* AC_VO */
                        ac = IEEE80211_AC_VO;
@@ -1763,6 +1866,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
                                sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
                        if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
                                uapsd = true;
+                       params[ac].mu_edca = !!mu_edca;
+                       if (mu_edca)
+                               params[ac].mu_edca_param_rec = mu_edca->ac_vo;
                        break;
                case 0: /* AC_BE */
                default:
@@ -1771,6 +1877,9 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
                                sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
                        if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
                                uapsd = true;
+                       params[ac].mu_edca = !!mu_edca;
+                       if (mu_edca)
+                               params[ac].mu_edca_param_rec = mu_edca->ac_be;
                        break;
                }
 
@@ -2219,6 +2328,20 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
                ieee80211_sta_reset_conn_monitor(sdata);
 }
 
+static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata,
+                                         const u8 *src, const u8 *dst,
+                                         const u8 *ssid, size_t ssid_len,
+                                         struct ieee80211_channel *channel)
+{
+       struct sk_buff *skb;
+
+       skb = ieee80211_build_probe_req(sdata, src, dst, (u32)-1, channel,
+                                       ssid, ssid_len, NULL, 0,
+                                       IEEE80211_PROBE_FLAG_DIRECTED);
+       if (skb)
+               ieee80211_tx_skb(sdata, skb);
+}
+
 static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -2265,10 +2388,9 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
                else
                        ssid_len = ssid[1];
 
-               ieee80211_send_probe_req(sdata, sdata->vif.addr, dst,
-                                        ssid + 2, ssid_len, NULL,
-                                        0, (u32) -1, true, 0,
-                                        ifmgd->associated->channel, false);
+               ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst,
+                                             ssid + 2, ssid_len,
+                                             ifmgd->associated->channel);
                rcu_read_unlock();
        }
 
@@ -2370,7 +2492,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
        skb = ieee80211_build_probe_req(sdata, sdata->vif.addr, cbss->bssid,
                                        (u32) -1, cbss->channel,
                                        ssid + 2, ssid_len,
-                                       NULL, 0, true);
+                                       NULL, 0, IEEE80211_PROBE_FLAG_DIRECTED);
        rcu_read_unlock();
 
        return skb;
@@ -3008,6 +3130,25 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
+       /*
+        * If AP doesn't support HT, or it doesn't have HE mandatory IEs, mark
+        * HE as disabled. If on the 5GHz band, make sure it supports VHT.
+        */
+       if (ifmgd->flags & IEEE80211_STA_DISABLE_HT ||
+           (sband->band == NL80211_BAND_5GHZ &&
+            ifmgd->flags & IEEE80211_STA_DISABLE_VHT) ||
+           (!elems.he_cap && !elems.he_operation))
+               ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
+           (!elems.he_cap || !elems.he_operation)) {
+               mutex_unlock(&sdata->local->sta_mtx);
+               sdata_info(sdata,
+                          "HE AP is missing HE capability/operation\n");
+               ret = false;
+               goto out;
+       }
+
        /* Set up internal HT/VHT capabilities */
        if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
                ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
@@ -3017,6 +3158,48 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
                ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
                                                    elems.vht_cap_elem, sta);
 
+       if (elems.he_operation && !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
+           elems.he_cap) {
+               ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
+                                                 elems.he_cap,
+                                                 elems.he_cap_len,
+                                                 sta);
+
+               bss_conf->he_support = sta->sta.he_cap.has_he;
+       } else {
+               bss_conf->he_support = false;
+       }
+
+       if (bss_conf->he_support) {
+               u32 he_oper_params =
+                       le32_to_cpu(elems.he_operation->he_oper_params);
+
+               bss_conf->bss_color = he_oper_params &
+                                     IEEE80211_HE_OPERATION_BSS_COLOR_MASK;
+               bss_conf->htc_trig_based_pkt_ext =
+                       (he_oper_params &
+                        IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK) <<
+                       IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET;
+               bss_conf->frame_time_rts_th =
+                       (he_oper_params &
+                        IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK) <<
+                       IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET;
+
+               bss_conf->multi_sta_back_32bit =
+                       sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
+                       IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP;
+
+               bss_conf->ack_enabled =
+                       sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
+                       IEEE80211_HE_MAC_CAP2_ACK_EN;
+
+               bss_conf->uora_exists = !!elems.uora_element;
+               if (elems.uora_element)
+                       bss_conf->uora_ocw_range = elems.uora_element[0];
+
+               /* TODO: OPEN: what happens if BSS color disable is set? */
+       }
+
        /*
         * Some APs, e.g. Netgear WNDR3700, report invalid HT operation data
         * in their association response, so ignore that data for our own
@@ -3076,7 +3259,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
        if (ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
                ieee80211_set_wmm_default(sdata, false, false);
        } else if (!ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
-                                            elems.wmm_param_len)) {
+                                            elems.wmm_param_len,
+                                            elems.mu_edca_param_set)) {
                /* still enable QoS since we might have HT/VHT */
                ieee80211_set_wmm_default(sdata, false, true);
                /* set the disable-WMM flag in this case to disable
@@ -3590,7 +3774,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
 
        if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) &&
            ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
-                                    elems.wmm_param_len))
+                                    elems.wmm_param_len,
+                                    elems.mu_edca_param_set))
                changed |= BSS_CHANGED_QOS;
 
        /*
@@ -3629,7 +3814,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
 
        if (ieee80211_config_bw(sdata, sta,
                                elems.ht_cap_elem, elems.ht_operation,
-                               elems.vht_operation, bssid, &changed)) {
+                               elems.vht_operation, elems.he_operation,
+                               bssid, &changed)) {
                mutex_unlock(&local->sta_mtx);
                sdata_info(sdata,
                           "failed to follow AP %pM bandwidth change, disconnect\n",
@@ -4266,6 +4452,68 @@ static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
        return chains;
 }
 
+static bool
+ieee80211_verify_sta_he_mcs_support(struct ieee80211_supported_band *sband,
+                                   const struct ieee80211_he_operation *he_op)
+{
+       const struct ieee80211_sta_he_cap *sta_he_cap =
+               ieee80211_get_he_sta_cap(sband);
+       u16 ap_min_req_set;
+       int i;
+
+       if (!sta_he_cap || !he_op)
+               return false;
+
+       ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+
+       /* Need to go over for 80MHz, 160MHz and for 80+80 */
+       for (i = 0; i < 3; i++) {
+               const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
+                       &sta_he_cap->he_mcs_nss_supp;
+               u16 sta_mcs_map_rx =
+                       le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
+               u16 sta_mcs_map_tx =
+                       le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
+               u8 nss;
+               bool verified = true;
+
+               /*
+                * For each band there is a maximum of 8 spatial streams
+                * possible. Each of the sta_mcs_map_* is a 16-bit struct built
+                * of 2 bits per NSS (1-8), with the values defined in enum
+                * ieee80211_he_mcs_support. Need to make sure STA TX and RX
+                * capabilities aren't less than the AP's minimum requirements
+                * for this HE BSS per SS.
+                * It is enough to find one such band that meets the reqs.
+                */
+               for (nss = 8; nss > 0; nss--) {
+                       u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
+                       u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
+                       u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+
+                       if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+                               continue;
+
+                       /*
+                        * Make sure the HE AP doesn't require MCSs that aren't
+                        * supported by the client
+                        */
+                       if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+                           sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+                           (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
+                               verified = false;
+                               break;
+                       }
+               }
+
+               if (verified)
+                       return true;
+       }
+
+       /* If here, STA doesn't meet AP's HE min requirements */
+       return false;
+}
+
 static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
                                  struct cfg80211_bss *cbss)
 {
@@ -4274,6 +4522,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
        const struct ieee80211_ht_cap *ht_cap = NULL;
        const struct ieee80211_ht_operation *ht_oper = NULL;
        const struct ieee80211_vht_operation *vht_oper = NULL;
+       const struct ieee80211_he_operation *he_oper = NULL;
        struct ieee80211_supported_band *sband;
        struct cfg80211_chan_def chandef;
        int ret;
@@ -4329,6 +4578,24 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
                }
        }
 
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
+           ieee80211_get_he_sta_cap(sband)) {
+               const struct cfg80211_bss_ies *ies;
+               const u8 *he_oper_ie;
+
+               ies = rcu_dereference(cbss->ies);
+               he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION,
+                                                 ies->data, ies->len);
+               if (he_oper_ie &&
+                   he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3]))
+                       he_oper = (void *)(he_oper_ie + 3);
+               else
+                       he_oper = NULL;
+
+               if (!ieee80211_verify_sta_he_mcs_support(sband, he_oper))
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+       }
+
        /* Allow VHT if at least one channel on the sband supports 80 MHz */
        have_80mhz = false;
        for (i = 0; i < sband->n_channels; i++) {
@@ -4345,7 +4612,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
 
        ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
                                                     cbss->channel,
-                                                    ht_oper, vht_oper,
+                                                    ht_oper, vht_oper, he_oper,
                                                     &chandef, false);
 
        sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
@@ -4751,8 +5018,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                    req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
                        ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
                        ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
                        netdev_info(sdata->dev,
-                                   "disabling HT/VHT due to WEP/TKIP use\n");
+                                   "disabling HE/HT/VHT due to WEP/TKIP use\n");
                }
        }
 
index f1d40b6645ff2777713885bef12936860f5ec9c8..8ef4153cd2994a73b4be5733a1dc12e9fa329640 100644 (file)
@@ -262,7 +262,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
        if (roc->mgmt_tx_cookie) {
                if (!WARN_ON(!roc->frame)) {
                        ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7,
-                                                 roc->chan->band);
+                                                 roc->chan->band, 0);
                        roc->frame = NULL;
                }
        } else {
index 0a38cc1cbebcee97ed7e8779ab487e2e0943e84c..a16ba568e2a3b45e7b82cc24fb2346d09223fa06 100644 (file)
@@ -175,6 +175,20 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
                len += 12;
        }
 
+       if (status->encoding == RX_ENC_HE &&
+           status->flag & RX_FLAG_RADIOTAP_HE) {
+               len = ALIGN(len, 2);
+               len += 12;
+               BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
+       }
+
+       if (status->encoding == RX_ENC_HE &&
+           status->flag & RX_FLAG_RADIOTAP_HE_MU) {
+               len = ALIGN(len, 2);
+               len += 12;
+               BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
+       }
+
        if (status->chains) {
                /* antenna and antenna signal fields */
                len += 2 * hweight8(status->chains);
@@ -263,6 +277,19 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        int mpdulen, chain;
        unsigned long chains = status->chains;
        struct ieee80211_vendor_radiotap rtap = {};
+       struct ieee80211_radiotap_he he = {};
+       struct ieee80211_radiotap_he_mu he_mu = {};
+
+       if (status->flag & RX_FLAG_RADIOTAP_HE) {
+               he = *(struct ieee80211_radiotap_he *)skb->data;
+               skb_pull(skb, sizeof(he));
+               WARN_ON_ONCE(status->encoding != RX_ENC_HE);
+       }
+
+       if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
+               he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
+               skb_pull(skb, sizeof(he_mu));
+       }
 
        if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
                rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
@@ -520,6 +547,89 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                *pos++ = flags;
        }
 
+       if (status->encoding == RX_ENC_HE &&
+           status->flag & RX_FLAG_RADIOTAP_HE) {
+#define HE_PREP(f, val)        cpu_to_le16(FIELD_PREP(IEEE80211_RADIOTAP_HE_##f, val))
+
+               if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
+                       he.data6 |= HE_PREP(DATA6_NSTS,
+                                           FIELD_GET(RX_ENC_FLAG_STBC_MASK,
+                                                     status->enc_flags));
+                       he.data3 |= HE_PREP(DATA3_STBC, 1);
+               } else {
+                       he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
+               }
+
+#define CHECK_GI(s) \
+       BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
+                    (int)NL80211_RATE_INFO_HE_GI_##s)
+
+               CHECK_GI(0_8);
+               CHECK_GI(1_6);
+               CHECK_GI(3_2);
+
+               he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
+               he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
+               he.data3 |= HE_PREP(DATA3_CODING,
+                                   !!(status->enc_flags & RX_ENC_FLAG_LDPC));
+
+               he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
+
+               switch (status->bw) {
+               case RATE_INFO_BW_20:
+                       he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
+                                           IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
+                       break;
+               case RATE_INFO_BW_40:
+                       he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
+                                           IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
+                       break;
+               case RATE_INFO_BW_80:
+                       he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
+                                           IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
+                       break;
+               case RATE_INFO_BW_160:
+                       he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
+                                           IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
+                       break;
+               case RATE_INFO_BW_HE_RU:
+#define CHECK_RU_ALLOC(s) \
+       BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
+                    NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
+
+                       CHECK_RU_ALLOC(26);
+                       CHECK_RU_ALLOC(52);
+                       CHECK_RU_ALLOC(106);
+                       CHECK_RU_ALLOC(242);
+                       CHECK_RU_ALLOC(484);
+                       CHECK_RU_ALLOC(996);
+                       CHECK_RU_ALLOC(2x996);
+
+                       he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
+                                           status->he_ru + 4);
+                       break;
+               default:
+                       WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
+               }
+
+               /* ensure 2 byte alignment */
+               while ((pos - (u8 *)rthdr) & 1)
+                       pos++;
+               rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
+               memcpy(pos, &he, sizeof(he));
+               pos += sizeof(he);
+       }
+
+       if (status->encoding == RX_ENC_HE &&
+           status->flag & RX_FLAG_RADIOTAP_HE_MU) {
+               /* ensure 2 byte alignment */
+               while ((pos - (u8 *)rthdr) & 1)
+                       pos++;
+               rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
+               memcpy(pos, &he_mu, sizeof(he_mu));
+               pos += sizeof(he_mu);
+       }
+
        for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
                *pos++ = status->chain_signal[chain];
                *pos++ = chain;
@@ -613,6 +723,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
                rcu_dereference(local->monitor_sdata);
        bool only_monitor = false;
 
+       if (status->flag & RX_FLAG_RADIOTAP_HE)
+               rtap_space += sizeof(struct ieee80211_radiotap_he);
+
+       if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
+               rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
+
        if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
                struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
 
@@ -3241,7 +3357,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
                }
 
                __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
-                                           status->band);
+                                           status->band, 0);
        }
        dev_kfree_skb(rx->skb);
        return RX_QUEUED;
@@ -3386,8 +3502,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
                status = IEEE80211_SKB_RXCB((rx->skb));
 
                sband = rx->local->hw.wiphy->bands[status->band];
-               if (!(status->encoding == RX_ENC_HT) &&
-                   !(status->encoding == RX_ENC_VHT))
+               if (status->encoding == RX_ENC_LEGACY)
                        rate = &sband->bitrates[status->rate_idx];
 
                ieee80211_rx_cooked_monitor(rx, rate);
@@ -4386,6 +4501,14 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
                                      status->rate_idx, status->nss))
                                goto drop;
                        break;
+               case RX_ENC_HE:
+                       if (WARN_ONCE(status->rate_idx > 11 ||
+                                     !status->nss ||
+                                     status->nss > 8,
+                                     "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
+                                     status->rate_idx, status->nss))
+                               goto drop;
+                       break;
                default:
                        WARN_ON_ONCE(1);
                        /* fall through */
index 2e917a6d239d234ce671b8b4017dbd23c4be5b2e..5d2a11777718c42c3ba4affb190904d2b7bd61de 100644 (file)
@@ -20,6 +20,7 @@
 #include <net/sch_generic.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/random.h>
 #include <net/mac80211.h>
 
 #include "ieee80211_i.h"
@@ -293,6 +294,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
        struct cfg80211_chan_def chandef;
        u8 bands_used = 0;
        int i, ielen, n_chans;
+       u32 flags = 0;
 
        req = rcu_dereference_protected(local->scan_req,
                                        lockdep_is_held(&local->mtx));
@@ -331,12 +333,16 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
        local->hw_scan_req->req.n_channels = n_chans;
        ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
 
+       if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
+               flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT;
+
        ielen = ieee80211_build_preq_ies(local,
                                         (u8 *)local->hw_scan_req->req.ie,
                                         local->hw_scan_ies_bufsize,
                                         &local->hw_scan_req->ies,
                                         req->ie, req->ie_len,
-                                        bands_used, req->rates, &chandef);
+                                        bands_used, req->rates, &chandef,
+                                        flags);
        local->hw_scan_req->req.ie_len = ielen;
        local->hw_scan_req->req.no_cck = req->no_cck;
        ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
@@ -528,6 +534,35 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
                                     round_jiffies_relative(0));
 }
 
+static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
+                                         const u8 *src, const u8 *dst,
+                                         const u8 *ssid, size_t ssid_len,
+                                         const u8 *ie, size_t ie_len,
+                                         u32 ratemask, u32 flags, u32 tx_flags,
+                                         struct ieee80211_channel *channel)
+{
+       struct sk_buff *skb;
+       u32 txdata_flags = 0;
+
+       skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel,
+                                       ssid, ssid_len,
+                                       ie, ie_len, flags);
+
+       if (skb) {
+               if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) {
+                       struct ieee80211_hdr *hdr = (void *)skb->data;
+                       u16 sn = get_random_u32();
+
+                       txdata_flags |= IEEE80211_TX_NO_SEQNO;
+                       hdr->seq_ctrl =
+                               cpu_to_le16(IEEE80211_SN_TO_SEQ(sn));
+               }
+               IEEE80211_SKB_CB(skb)->flags |= tx_flags;
+               ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band,
+                                         txdata_flags);
+       }
+}
+
 static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
                                            unsigned long *next_delay)
 {
@@ -535,7 +570,7 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
        struct ieee80211_sub_if_data *sdata;
        struct cfg80211_scan_request *scan_req;
        enum nl80211_band band = local->hw.conf.chandef.chan->band;
-       u32 tx_flags;
+       u32 flags = 0, tx_flags;
 
        scan_req = rcu_dereference_protected(local->scan_req,
                                             lockdep_is_held(&local->mtx));
@@ -543,17 +578,21 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
        tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
        if (scan_req->no_cck)
                tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+       if (scan_req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
+               flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT;
+       if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_SN)
+               flags |= IEEE80211_PROBE_FLAG_RANDOM_SN;
 
        sdata = rcu_dereference_protected(local->scan_sdata,
                                          lockdep_is_held(&local->mtx));
 
        for (i = 0; i < scan_req->n_ssids; i++)
-               ieee80211_send_probe_req(
+               ieee80211_send_scan_probe_req(
                        sdata, local->scan_addr, scan_req->bssid,
                        scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len,
                        scan_req->ie, scan_req->ie_len,
-                       scan_req->rates[band], false,
-                       tx_flags, local->hw.conf.chandef.chan, true);
+                       scan_req->rates[band], flags,
+                       tx_flags, local->hw.conf.chandef.chan);
 
        /*
         * After sending probe requests, wait for probe responses
@@ -1141,6 +1180,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
        u32 rate_masks[NUM_NL80211_BANDS] = {};
        u8 bands_used = 0;
        u8 *ie;
+       u32 flags = 0;
 
        iebufsz = local->scan_ies_len + req->ie_len;
 
@@ -1157,6 +1197,9 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
                }
        }
 
+       if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT)
+               flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT;
+
        ie = kcalloc(iebufsz, num_bands, GFP_KERNEL);
        if (!ie) {
                ret = -ENOMEM;
@@ -1167,7 +1210,8 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_build_preq_ies(local, ie, num_bands * iebufsz,
                                 &sched_scan_ies, req->ie,
-                                req->ie_len, bands_used, rate_masks, &chandef);
+                                req->ie_len, bands_used, rate_masks, &chandef,
+                                flags);
 
        ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
        if (ret == 0) {
index 6428f1ac37b67afda55d7335bee59b2d140813a8..f34202242d24d074f5cca49c7b4b7a101114f73b 100644 (file)
@@ -1323,6 +1323,11 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid,
        struct ieee80211_tx_info *info;
        struct ieee80211_chanctx_conf *chanctx_conf;
 
+       /* Don't send NDPs when STA is connected HE */
+       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+           !(sdata->u.mgd.flags & IEEE80211_STA_DISABLE_HE))
+               return;
+
        if (qos) {
                fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                 IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1391,7 +1396,7 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid,
        }
 
        info->band = chanctx_conf->def.chan->band;
-       ieee80211_xmit(sdata, sta, skb);
+       ieee80211_xmit(sdata, sta, skb, 0);
        rcu_read_unlock();
 }
 
@@ -1968,7 +1973,7 @@ sta_get_last_rx_stats(struct sta_info *sta)
        return stats;
 }
 
-static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
+static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
                                  struct rate_info *rinfo)
 {
        rinfo->bw = STA_STATS_GET(BW, rate);
@@ -2005,6 +2010,14 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
                rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
                break;
                }
+       case STA_STATS_RATE_TYPE_HE:
+               rinfo->flags = RATE_INFO_FLAGS_HE_MCS;
+               rinfo->mcs = STA_STATS_GET(HE_MCS, rate);
+               rinfo->nss = STA_STATS_GET(HE_NSS, rate);
+               rinfo->he_gi = STA_STATS_GET(HE_GI, rate);
+               rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate);
+               rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate);
+               break;
        }
 }
 
@@ -2101,38 +2114,38 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
 
        drv_sta_statistics(local, sdata, &sta->sta, sinfo);
 
-       sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME) |
-                        BIT(NL80211_STA_INFO_STA_FLAGS) |
-                        BIT(NL80211_STA_INFO_BSS_PARAM) |
-                        BIT(NL80211_STA_INFO_CONNECTED_TIME) |
-                        BIT(NL80211_STA_INFO_RX_DROP_MISC);
+       sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
+                        BIT_ULL(NL80211_STA_INFO_STA_FLAGS) |
+                        BIT_ULL(NL80211_STA_INFO_BSS_PARAM) |
+                        BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) |
+                        BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION) {
                sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count;
-               sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_LOSS);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS);
        }
 
        sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
        sinfo->inactive_time =
                jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta));
 
-       if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) |
-                              BIT(NL80211_STA_INFO_TX_BYTES)))) {
+       if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) |
+                              BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
                sinfo->tx_bytes = 0;
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        sinfo->tx_bytes += sta->tx_stats.bytes[ac];
-               sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
        }
 
-       if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_PACKETS))) {
+       if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) {
                sinfo->tx_packets = 0;
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        sinfo->tx_packets += sta->tx_stats.packets[ac];
-               sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
        }
 
-       if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) |
-                              BIT(NL80211_STA_INFO_RX_BYTES)))) {
+       if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
+                              BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
                sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
 
                if (sta->pcpu_rx_stats) {
@@ -2144,10 +2157,10 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
                        }
                }
 
-               sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
        }
 
-       if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) {
+       if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) {
                sinfo->rx_packets = sta->rx_stats.packets;
                if (sta->pcpu_rx_stats) {
                        for_each_possible_cpu(cpu) {
@@ -2157,17 +2170,17 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
                                sinfo->rx_packets += cpurxs->packets;
                        }
                }
-               sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
        }
 
-       if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_RETRIES))) {
+       if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) {
                sinfo->tx_retries = sta->status_stats.retry_count;
-               sinfo->filled |= BIT(NL80211_STA_INFO_TX_RETRIES);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
        }
 
-       if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_FAILED))) {
+       if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) {
                sinfo->tx_failed = sta->status_stats.retry_failed;
-               sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
        }
 
        sinfo->rx_dropped_misc = sta->rx_stats.dropped;
@@ -2182,23 +2195,23 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION &&
            !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
-               sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX) |
-                                BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) |
+                                BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
                sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
        }
 
        if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
            ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
-               if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
+               if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) {
                        sinfo->signal = (s8)last_rxstats->last_signal;
-                       sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+                       sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
                }
 
                if (!sta->pcpu_rx_stats &&
-                   !(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
+                   !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) {
                        sinfo->signal_avg =
                                -ewma_signal_read(&sta->rx_stats_avg.signal);
-                       sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+                       sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
                }
        }
 
@@ -2207,11 +2220,11 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
         * pcpu statistics
         */
        if (last_rxstats->chains &&
-           !(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
-                              BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
-               sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+           !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) |
+                              BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
                if (!sta->pcpu_rx_stats)
-                       sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+                       sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
 
                sinfo->chains = last_rxstats->chains;
 
@@ -2223,15 +2236,15 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
                }
        }
 
-       if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE))) {
+       if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
                sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate,
                                     &sinfo->txrate);
-               sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
        }
 
-       if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE))) {
+       if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) {
                if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0)
-                       sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+                       sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
        }
 
        if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) {
@@ -2244,18 +2257,18 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
 
        if (ieee80211_vif_is_mesh(&sdata->vif)) {
 #ifdef CONFIG_MAC80211_MESH
-               sinfo->filled |= BIT(NL80211_STA_INFO_LLID) |
-                                BIT(NL80211_STA_INFO_PLID) |
-                                BIT(NL80211_STA_INFO_PLINK_STATE) |
-                                BIT(NL80211_STA_INFO_LOCAL_PM) |
-                                BIT(NL80211_STA_INFO_PEER_PM) |
-                                BIT(NL80211_STA_INFO_NONPEER_PM);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) |
+                                BIT_ULL(NL80211_STA_INFO_PLID) |
+                                BIT_ULL(NL80211_STA_INFO_PLINK_STATE) |
+                                BIT_ULL(NL80211_STA_INFO_LOCAL_PM) |
+                                BIT_ULL(NL80211_STA_INFO_PEER_PM) |
+                                BIT_ULL(NL80211_STA_INFO_NONPEER_PM);
 
                sinfo->llid = sta->mesh->llid;
                sinfo->plid = sta->mesh->plid;
                sinfo->plink_state = sta->mesh->plink_state;
                if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
-                       sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET);
+                       sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET);
                        sinfo->t_offset = sta->mesh->t_offset;
                }
                sinfo->local_pm = sta->mesh->local_pm;
@@ -2300,7 +2313,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
        thr = sta_get_expected_throughput(sta);
 
        if (thr != 0) {
-               sinfo->filled |= BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
                sinfo->expected_throughput = thr;
        }
 
index 81b35f62379249e590b178283569bbd2c57bcd05..9a04327d71d1de1129a7589195c574e8b62fa74c 100644 (file)
@@ -170,7 +170,7 @@ struct tid_ampdu_tx {
        u8 dialog_token;
        u8 stop_initiator;
        bool tx_stop;
-       u8 buf_size;
+       u16 buf_size;
 
        u16 failed_bar_ssn;
        bool bar_pending;
@@ -405,7 +405,7 @@ struct ieee80211_sta_rx_stats {
        int last_signal;
        u8 chains;
        s8 chain_signal_last[IEEE80211_MAX_CHAINS];
-       u16 last_rate;
+       u32 last_rate;
        struct u64_stats_sync syncp;
        u64 bytes;
        u64 msdu[IEEE80211_NUM_TIDS + 1];
@@ -764,6 +764,7 @@ enum sta_stats_type {
        STA_STATS_RATE_TYPE_LEGACY,
        STA_STATS_RATE_TYPE_HT,
        STA_STATS_RATE_TYPE_VHT,
+       STA_STATS_RATE_TYPE_HE,
 };
 
 #define STA_STATS_FIELD_HT_MCS         GENMASK( 7,  0)
@@ -771,9 +772,14 @@ enum sta_stats_type {
 #define STA_STATS_FIELD_LEGACY_BAND    GENMASK( 7,  4)
 #define STA_STATS_FIELD_VHT_MCS                GENMASK( 3,  0)
 #define STA_STATS_FIELD_VHT_NSS                GENMASK( 7,  4)
+#define STA_STATS_FIELD_HE_MCS         GENMASK( 3,  0)
+#define STA_STATS_FIELD_HE_NSS         GENMASK( 7,  4)
 #define STA_STATS_FIELD_BW             GENMASK(11,  8)
 #define STA_STATS_FIELD_SGI            GENMASK(12, 12)
 #define STA_STATS_FIELD_TYPE           GENMASK(15, 13)
+#define STA_STATS_FIELD_HE_RU          GENMASK(18, 16)
+#define STA_STATS_FIELD_HE_GI          GENMASK(20, 19)
+#define STA_STATS_FIELD_HE_DCM         GENMASK(21, 21)
 
 #define STA_STATS_FIELD(_n, _v)                FIELD_PREP(STA_STATS_FIELD_ ## _n, _v)
 #define STA_STATS_GET(_n, _v)          FIELD_GET(STA_STATS_FIELD_ ## _n, _v)
@@ -782,7 +788,7 @@ enum sta_stats_type {
 
 static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s)
 {
-       u16 r;
+       u32 r;
 
        r = STA_STATS_FIELD(BW, s->bw);
 
@@ -804,6 +810,14 @@ static inline u32 sta_stats_encode_rate(struct ieee80211_rx_status *s)
                r |= STA_STATS_FIELD(LEGACY_BAND, s->band);
                r |= STA_STATS_FIELD(LEGACY_IDX, s->rate_idx);
                break;
+       case RX_ENC_HE:
+               r |= STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_HE);
+               r |= STA_STATS_FIELD(HE_NSS, s->nss);
+               r |= STA_STATS_FIELD(HE_MCS, s->rate_idx);
+               r |= STA_STATS_FIELD(HE_GI, s->he_gi);
+               r |= STA_STATS_FIELD(HE_RU, s->he_ru);
+               r |= STA_STATS_FIELD(HE_DCM, s->he_dcm);
+               break;
        default:
                WARN_ON(1);
                return STA_STATS_RATE_INVALID;
index 80a7edf8d314a12a22f62047e585275a7baa3ab9..0ab69a1964f8b97a7035a58f0321e5490c6a9a58 100644 (file)
@@ -92,7 +92,7 @@
                                STA_ENTRY                                               \
                                __field(u16, tid)                                       \
                                __field(u16, ssn)                                       \
-                               __field(u8, buf_size)                                   \
+                               __field(u16, buf_size)                                  \
                                __field(bool, amsdu)                                    \
                                __field(u16, timeout)                                   \
                                __field(u16, action)
index 44b5dfe8727d936d39338006bc89b125c848d12b..6a79d564de35ef074707d9707709a552e2f671d1 100644 (file)
@@ -825,6 +825,8 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
         */
        if (!ieee80211_is_data_qos(hdr->frame_control) ||
            is_multicast_ether_addr(hdr->addr1)) {
+               if (tx->flags & IEEE80211_TX_NO_SEQNO)
+                       return TX_CONTINUE;
                /* driver should assign sequence number */
                info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
                /* for pure STA mode without beacons, we can do it */
@@ -1854,7 +1856,7 @@ EXPORT_SYMBOL(ieee80211_tx_prepare_skb);
  */
 static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                         struct sta_info *sta, struct sk_buff *skb,
-                        bool txpending)
+                        bool txpending, u32 txdata_flags)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_tx_data tx;
@@ -1872,6 +1874,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
        led_len = skb->len;
        res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb);
 
+       tx.flags |= txdata_flags;
+
        if (unlikely(res_prepare == TX_DROP)) {
                ieee80211_free_txskb(&local->hw, skb);
                return true;
@@ -1933,7 +1937,8 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
 }
 
 void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
-                   struct sta_info *sta, struct sk_buff *skb)
+                   struct sta_info *sta, struct sk_buff *skb,
+                   u32 txdata_flags)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1968,7 +1973,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
        }
 
        ieee80211_set_qos_hdr(sdata, skb);
-       ieee80211_tx(sdata, sta, skb, false);
+       ieee80211_tx(sdata, sta, skb, false, txdata_flags);
 }
 
 static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local,
@@ -2289,7 +2294,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
        if (!ieee80211_parse_tx_radiotap(local, skb))
                goto fail_rcu;
 
-       ieee80211_xmit(sdata, NULL, skb);
+       ieee80211_xmit(sdata, NULL, skb, 0);
        rcu_read_unlock();
 
        return NETDEV_TX_OK;
@@ -3648,7 +3653,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
 
                ieee80211_tx_stats(dev, skb->len);
 
-               ieee80211_xmit(sdata, sta, skb);
+               ieee80211_xmit(sdata, sta, skb, 0);
        }
        goto out;
  out_free:
@@ -3867,7 +3872,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
                        return true;
                }
                info->band = chanctx_conf->def.chan->band;
-               result = ieee80211_tx(sdata, NULL, skb, true);
+               result = ieee80211_tx(sdata, NULL, skb, true, 0);
        } else {
                struct sk_buff_head skbs;
 
@@ -4783,7 +4788,7 @@ EXPORT_SYMBOL(ieee80211_unreserve_tid);
 
 void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
                                 struct sk_buff *skb, int tid,
-                                enum nl80211_band band)
+                                enum nl80211_band band, u32 txdata_flags)
 {
        int ac = ieee80211_ac_from_tid(tid);
 
@@ -4800,7 +4805,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
         */
        local_bh_disable();
        IEEE80211_SKB_CB(skb)->band = band;
-       ieee80211_xmit(sdata, NULL, skb);
+       ieee80211_xmit(sdata, NULL, skb, txdata_flags);
        local_bh_enable();
 }
 
@@ -4845,7 +4850,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
        skb_reset_network_header(skb);
        skb_reset_mac_header(skb);
 
+       local_bh_disable();
        __ieee80211_subif_start_xmit(skb, skb->dev, flags);
+       local_bh_enable();
 
        return 0;
 }
index 5e2e511c4a6f69cf0b613c1b3facd0665d672cfd..3e68132a41fabd029a8a78161eb783570d1c20f7 100644 (file)
@@ -1095,6 +1095,21 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
                        if (elen >= sizeof(*elems->max_idle_period_ie))
                                elems->max_idle_period_ie = (void *)pos;
                        break;
+               case WLAN_EID_EXTENSION:
+                       if (pos[0] == WLAN_EID_EXT_HE_MU_EDCA &&
+                           elen >= (sizeof(*elems->mu_edca_param_set) + 1)) {
+                               elems->mu_edca_param_set = (void *)&pos[1];
+                       } else if (pos[0] == WLAN_EID_EXT_HE_CAPABILITY) {
+                               elems->he_cap = (void *)&pos[1];
+                               elems->he_cap_len = elen - 1;
+                       } else if (pos[0] == WLAN_EID_EXT_HE_OPERATION &&
+                                  elen >= sizeof(*elems->he_operation) &&
+                                  elen >= ieee80211_he_oper_size(&pos[1])) {
+                               elems->he_operation = (void *)&pos[1];
+                       } else if (pos[0] == WLAN_EID_EXT_UORA && elen >= 1) {
+                               elems->uora_element = (void *)&pos[1];
+                       }
+                       break;
                default:
                        break;
                }
@@ -1353,9 +1368,10 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
                                         enum nl80211_band band,
                                         u32 rate_mask,
                                         struct cfg80211_chan_def *chandef,
-                                        size_t *offset)
+                                        size_t *offset, u32 flags)
 {
        struct ieee80211_supported_band *sband;
+       const struct ieee80211_sta_he_cap *he_cap;
        u8 *pos = buffer, *end = buffer + buffer_len;
        size_t noffset;
        int supp_rates_len, i;
@@ -1433,6 +1449,9 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
                                chandef->chan->center_freq);
        }
 
+       if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT)
+               goto done;
+
        /* insert custom IEs that go before HT */
        if (ie && ie_len) {
                static const u8 before_ht[] = {
@@ -1460,11 +1479,6 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
                                                sband->ht_cap.cap);
        }
 
-       /*
-        * If adding more here, adjust code in main.c
-        * that calculates local->scan_ies_len.
-        */
-
        /* insert custom IEs that go before VHT */
        if (ie && ie_len) {
                static const u8 before_vht[] = {
@@ -1507,9 +1521,43 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
                                                 sband->vht_cap.cap);
        }
 
+       /* insert custom IEs that go before HE */
+       if (ie && ie_len) {
+               static const u8 before_he[] = {
+                       /*
+                        * no need to list the ones split off before VHT
+                        * or generated here
+                        */
+                       WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_REQ_PARAMS,
+                       WLAN_EID_AP_CSN,
+                       /* TODO: add 11ah/11aj/11ak elements */
+               };
+               noffset = ieee80211_ie_split(ie, ie_len,
+                                            before_he, ARRAY_SIZE(before_he),
+                                            *offset);
+               if (end - pos < noffset - *offset)
+                       goto out_err;
+               memcpy(pos, ie + *offset, noffset - *offset);
+               pos += noffset - *offset;
+               *offset = noffset;
+       }
+
+       he_cap = ieee80211_get_he_sta_cap(sband);
+       if (he_cap) {
+               pos = ieee80211_ie_build_he_cap(pos, he_cap, end);
+               if (!pos)
+                       goto out_err;
+       }
+
+       /*
+        * If adding more here, adjust code in main.c
+        * that calculates local->scan_ies_len.
+        */
+
        return pos - buffer;
  out_err:
        WARN_ONCE(1, "not enough space for preq IEs\n");
+ done:
        return pos - buffer;
 }
 
@@ -1518,7 +1566,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                             struct ieee80211_scan_ies *ie_desc,
                             const u8 *ie, size_t ie_len,
                             u8 bands_used, u32 *rate_masks,
-                            struct cfg80211_chan_def *chandef)
+                            struct cfg80211_chan_def *chandef,
+                            u32 flags)
 {
        size_t pos = 0, old_pos = 0, custom_ie_offset = 0;
        int i;
@@ -1533,7 +1582,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                                                             ie, ie_len, i,
                                                             rate_masks[i],
                                                             chandef,
-                                                            &custom_ie_offset);
+                                                            &custom_ie_offset,
+                                                            flags);
                        ie_desc->ies[i] = buffer + old_pos;
                        ie_desc->len[i] = pos - old_pos;
                        old_pos = pos;
@@ -1561,7 +1611,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          struct ieee80211_channel *chan,
                                          const u8 *ssid, size_t ssid_len,
                                          const u8 *ie, size_t ie_len,
-                                         bool directed)
+                                         u32 flags)
 {
        struct ieee80211_local *local = sdata->local;
        struct cfg80211_chan_def chandef;
@@ -1577,7 +1627,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
         * badly-behaved APs don't respond when this parameter is included.
         */
        chandef.width = sdata->vif.bss_conf.chandef.width;
-       if (directed)
+       if (flags & IEEE80211_PROBE_FLAG_DIRECTED)
                chandef.chan = NULL;
        else
                chandef.chan = chan;
@@ -1591,7 +1641,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb),
                                           skb_tailroom(skb), &dummy_ie_desc,
                                           ie, ie_len, BIT(chan->band),
-                                          rate_masks, &chandef);
+                                          rate_masks, &chandef, flags);
        skb_put(skb, ies_len);
 
        if (dst) {
@@ -1605,27 +1655,6 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        return skb;
 }
 
-void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata,
-                             const u8 *src, const u8 *dst,
-                             const u8 *ssid, size_t ssid_len,
-                             const u8 *ie, size_t ie_len,
-                             u32 ratemask, bool directed, u32 tx_flags,
-                             struct ieee80211_channel *channel, bool scan)
-{
-       struct sk_buff *skb;
-
-       skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel,
-                                       ssid, ssid_len,
-                                       ie, ie_len, directed);
-       if (skb) {
-               IEEE80211_SKB_CB(skb)->flags |= tx_flags;
-               if (scan)
-                       ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
-               else
-                       ieee80211_tx_skb(sdata, skb);
-       }
-}
-
 u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
                            struct ieee802_11_elems *elems,
                            enum nl80211_band band, u32 *basic_rates)
@@ -2412,6 +2441,72 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
        return pos;
 }
 
+u8 *ieee80211_ie_build_he_cap(u8 *pos,
+                             const struct ieee80211_sta_he_cap *he_cap,
+                             u8 *end)
+{
+       u8 n;
+       u8 ie_len;
+       u8 *orig_pos = pos;
+
+       /* Make sure we have place for the IE */
+       /*
+        * TODO: the 1 added is because this temporarily is under the EXTENSION
+        * IE. Get rid of it when it moves.
+        */
+       if (!he_cap)
+               return orig_pos;
+
+       n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem);
+       ie_len = 2 + 1 +
+                sizeof(he_cap->he_cap_elem) + n +
+                ieee80211_he_ppe_size(he_cap->ppe_thres[0],
+                                      he_cap->he_cap_elem.phy_cap_info);
+
+       if ((end - pos) < ie_len)
+               return orig_pos;
+
+       *pos++ = WLAN_EID_EXTENSION;
+       pos++; /* We'll set the size later below */
+       *pos++ = WLAN_EID_EXT_HE_CAPABILITY;
+
+       /* Fixed data */
+       memcpy(pos, &he_cap->he_cap_elem, sizeof(he_cap->he_cap_elem));
+       pos += sizeof(he_cap->he_cap_elem);
+
+       memcpy(pos, &he_cap->he_mcs_nss_supp, n);
+       pos += n;
+
+       /* Check if PPE Threshold should be present */
+       if ((he_cap->he_cap_elem.phy_cap_info[6] &
+            IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
+               goto end;
+
+       /*
+        * Calculate how many PPET16/PPET8 pairs are to come. Algorithm:
+        * (NSS_M1 + 1) x (num of 1 bits in RU_INDEX_BITMASK)
+        */
+       n = hweight8(he_cap->ppe_thres[0] &
+                    IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+       n *= (1 + ((he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) >>
+                  IEEE80211_PPE_THRES_NSS_POS));
+
+       /*
+        * Each pair is 6 bits, and we need to add the 7 "header" bits to the
+        * total size.
+        */
+       n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
+       n = DIV_ROUND_UP(n, 8);
+
+       /* Copy PPE Thresholds */
+       memcpy(pos, &he_cap->ppe_thres, n);
+       pos += n;
+
+end:
+       orig_pos[1] = (pos - orig_pos) - 2;
+       return pos;
+}
+
 u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
                               const struct cfg80211_chan_def *chandef,
                               u16 prot_mode, bool rifs_mode)
index e7b05de1e6d1e136eb509293c4fde81468e12642..25e483e8278bd0404bf044c1a1748fdd1db77580 100644 (file)
@@ -73,8 +73,8 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
        ncm->data[2] = data;
        ncm->data[4] = ntohl(lsc->oem_status);
 
-       netdev_info(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
-                   nc->id, data & 0x1 ? "up" : "down");
+       netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
+                  nc->id, data & 0x1 ? "up" : "down");
 
        chained = !list_empty(&nc->link);
        state = nc->state;
@@ -148,9 +148,9 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
        hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
        ncm->data[3] = ntohl(hncdsc->status);
        spin_unlock_irqrestore(&nc->lock, flags);
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: host driver %srunning on channel %u\n",
-                     ncm->data[3] & 0x1 ? "" : "not ", nc->id);
+       netdev_dbg(ndp->ndev.dev,
+                  "NCSI: host driver %srunning on channel %u\n",
+                  ncm->data[3] & 0x1 ? "" : "not ", nc->id);
 
        return 0;
 }
index 5561e221b71f10b223b381c2ed4b0752bedbc225..091284760d21fa02dc0f9997a2c68ce7f1f618e6 100644 (file)
@@ -788,8 +788,8 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                }
                break;
        case ncsi_dev_state_config_done:
-               netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                             "NCSI: channel %u config done\n", nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
+                          nc->id);
                spin_lock_irqsave(&nc->lock, flags);
                if (nc->reconfigure_needed) {
                        /* This channel's configuration has been updated
@@ -804,8 +804,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                        list_add_tail_rcu(&nc->link, &ndp->channel_queue);
                        spin_unlock_irqrestore(&ndp->lock, flags);
 
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "Dirty NCSI channel state reset\n");
+                       netdev_dbg(dev, "Dirty NCSI channel state reset\n");
                        ncsi_process_next_channel(ndp);
                        break;
                }
@@ -816,9 +815,9 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                } else {
                        hot_nc = NULL;
                        nc->state = NCSI_CHANNEL_INACTIVE;
-                       netdev_warn(ndp->ndev.dev,
-                                   "NCSI: channel %u link down after config\n",
-                                   nc->id);
+                       netdev_dbg(ndp->ndev.dev,
+                                  "NCSI: channel %u link down after config\n",
+                                  nc->id);
                }
                spin_unlock_irqrestore(&nc->lock, flags);
 
@@ -908,9 +907,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
        }
 
        ncm = &found->modes[NCSI_MODE_LINK];
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: Channel %u added to queue (link %s)\n",
-                     found->id, ncm->data[2] & 0x1 ? "up" : "down");
+       netdev_dbg(ndp->ndev.dev,
+                  "NCSI: Channel %u added to queue (link %s)\n",
+                  found->id, ncm->data[2] & 0x1 ? "up" : "down");
 
 out:
        spin_lock_irqsave(&ndp->lock, flags);
@@ -1199,14 +1198,14 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
        switch (old_state) {
        case NCSI_CHANNEL_INACTIVE:
                ndp->ndev.state = ncsi_dev_state_config;
-               netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n",
-                           nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
+                          nc->id);
                ncsi_configure_channel(ndp);
                break;
        case NCSI_CHANNEL_ACTIVE:
                ndp->ndev.state = ncsi_dev_state_suspend;
-               netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n",
-                           nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
+                          nc->id);
                ncsi_suspend_channel(ndp);
                break;
        default:
@@ -1226,8 +1225,6 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
                return ncsi_choose_active_channel(ndp);
        }
 
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: No more channels to process\n");
        ncsi_report_link(ndp, false);
        return -ENODEV;
 }
@@ -1318,9 +1315,9 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
                                if ((ndp->ndev.state & 0xff00) ==
                                                ncsi_dev_state_config ||
                                                !list_empty(&nc->link)) {
-                                       netdev_printk(KERN_DEBUG, nd->dev,
-                                                     "NCSI: channel %p marked dirty\n",
-                                                     nc);
+                                       netdev_dbg(nd->dev,
+                                                  "NCSI: channel %p marked dirty\n",
+                                                  nc);
                                        nc->reconfigure_needed = true;
                                }
                                spin_unlock_irqrestore(&nc->lock, flags);
@@ -1338,8 +1335,7 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
                        list_add_tail_rcu(&nc->link, &ndp->channel_queue);
                        spin_unlock_irqrestore(&ndp->lock, flags);
 
-                       netdev_printk(KERN_DEBUG, nd->dev,
-                                     "NCSI: kicked channel %p\n", nc);
+                       netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
                        n++;
                }
        }
@@ -1370,8 +1366,8 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
                n_vids++;
                if (vlan->vid == vid) {
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "NCSI: vid %u already registered\n", vid);
+                       netdev_dbg(dev, "NCSI: vid %u already registered\n",
+                                  vid);
                        return 0;
                }
        }
@@ -1390,7 +1386,7 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        vlan->vid = vid;
        list_add_rcu(&vlan->list, &ndp->vlan_vids);
 
-       netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid);
+       netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
 
        found = ncsi_kick_channels(ndp) != 0;
 
@@ -1419,8 +1415,7 @@ int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
        /* Remove the VLAN id from our internal list */
        list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
                if (vlan->vid == vid) {
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "NCSI: vid %u found, removing\n", vid);
+                       netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
                        list_del_rcu(&vlan->list);
                        found = true;
                        kfree(vlan);
@@ -1547,7 +1542,7 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
                }
        }
 
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n");
+       netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
        ncsi_report_link(ndp, true);
 }
 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
index 168af54db975d188a5224feb0113f05c12b83e0a..dc240cb47ddfac2466c9206dca8d8e0064c5e9e1 100644 (file)
@@ -603,6 +603,21 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
 }
 EXPORT_SYMBOL(nf_conntrack_destroy);
 
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+                        const struct sk_buff *skb)
+{
+       struct nf_ct_hook *ct_hook;
+       bool ret = false;
+
+       rcu_read_lock();
+       ct_hook = rcu_dereference(nf_ct_hook);
+       if (ct_hook)
+               ret = ct_hook->get_tuple_skb(dst_tuple, skb);
+       rcu_read_unlock();
+       return ret;
+}
+EXPORT_SYMBOL(nf_ct_get_tuple_skb);
+
 /* Built-in default zone used e.g. by modules. */
 const struct nf_conntrack_zone nf_ct_zone_dflt = {
        .id     = NF_CT_DEFAULT_ZONE_ID,
index d8383609fe2825b707cfb8ebc54381761ccc1108..510039862aa93c99904d2dbd3a7969327d0d896a 100644 (file)
@@ -47,6 +47,8 @@ struct nf_conncount_tuple {
        struct hlist_node               node;
        struct nf_conntrack_tuple       tuple;
        struct nf_conntrack_zone        zone;
+       int                             cpu;
+       u32                             jiffies32;
 };
 
 struct nf_conncount_rb {
@@ -91,11 +93,42 @@ bool nf_conncount_add(struct hlist_head *head,
                return false;
        conn->tuple = *tuple;
        conn->zone = *zone;
+       conn->cpu = raw_smp_processor_id();
+       conn->jiffies32 = (u32)jiffies;
        hlist_add_head(&conn->node, head);
        return true;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_add);
 
+static const struct nf_conntrack_tuple_hash *
+find_or_evict(struct net *net, struct nf_conncount_tuple *conn)
+{
+       const struct nf_conntrack_tuple_hash *found;
+       unsigned long a, b;
+       int cpu = raw_smp_processor_id();
+       __s32 age;
+
+       found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
+       if (found)
+               return found;
+       b = conn->jiffies32;
+       a = (u32)jiffies;
+
+       /* conn might have been added just before by another cpu and
+        * might still be unconfirmed.  In this case, nf_conntrack_find()
+        * returns no result.  Thus only evict if this cpu added the
+        * stale entry or if the entry is older than two jiffies.
+        */
+       age = a - b;
+       if (conn->cpu == cpu || age >= 2) {
+               hlist_del(&conn->node);
+               kmem_cache_free(conncount_conn_cachep, conn);
+               return ERR_PTR(-ENOENT);
+       }
+
+       return ERR_PTR(-EAGAIN);
+}
+
 unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
                                 const struct nf_conntrack_tuple *tuple,
                                 const struct nf_conntrack_zone *zone,
@@ -103,18 +136,27 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
 {
        const struct nf_conntrack_tuple_hash *found;
        struct nf_conncount_tuple *conn;
-       struct hlist_node *n;
        struct nf_conn *found_ct;
+       struct hlist_node *n;
        unsigned int length = 0;
 
        *addit = tuple ? true : false;
 
        /* check the saved connections */
        hlist_for_each_entry_safe(conn, n, head, node) {
-               found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
-               if (found == NULL) {
-                       hlist_del(&conn->node);
-                       kmem_cache_free(conncount_conn_cachep, conn);
+               found = find_or_evict(net, conn);
+               if (IS_ERR(found)) {
+                       /* Not found, but might be about to be confirmed */
+                       if (PTR_ERR(found) == -EAGAIN) {
+                               length++;
+                               if (!tuple)
+                                       continue;
+
+                               if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
+                                   nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
+                                   nf_ct_zone_id(zone, zone->dir))
+                                       *addit = false;
+                       }
                        continue;
                }
 
index a1086bdec2429c2d26d4cbb6b2a12bd8927b013d..5423b197d98a2b49e2ecc6e6de901702302f834e 100644 (file)
@@ -32,7 +32,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
        __be32 mask = 0;
 
        /* we're only interested in locally generated packets */
-       if (skb->sk == NULL)
+       if (skb->sk == NULL || !net_eq(nf_ct_net(ct), sock_net(skb->sk)))
                goto out;
        if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
                goto out;
index 3465da2a98bd4ff68fc8e52935aad047c69855e8..85ab2fd6a66515ea77cf34b9a972f36adfdba0a2 100644 (file)
@@ -1683,6 +1683,41 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
        return 0;
 }
 
+static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+                                      const struct sk_buff *skb)
+{
+       const struct nf_conntrack_tuple *src_tuple;
+       const struct nf_conntrack_tuple_hash *hash;
+       struct nf_conntrack_tuple srctuple;
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (ct) {
+               src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+               memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+               return true;
+       }
+
+       if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+                              NFPROTO_IPV4, dev_net(skb->dev),
+                              &srctuple))
+               return false;
+
+       hash = nf_conntrack_find_get(dev_net(skb->dev),
+                                    &nf_ct_zone_dflt,
+                                    &srctuple);
+       if (!hash)
+               return false;
+
+       ct = nf_ct_tuplehash_to_ctrack(hash);
+       src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+       memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+       nf_ct_put(ct);
+
+       return true;
+}
+
 /* Bring out ya dead! */
 static struct nf_conn *
 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
@@ -2204,6 +2239,7 @@ int nf_conntrack_init_start(void)
 static struct nf_ct_hook nf_conntrack_hook = {
        .update         = nf_conntrack_update,
        .destroy        = destroy_conntrack,
+       .get_tuple_skb  = nf_conntrack_get_tuple_skb,
 };
 
 void nf_conntrack_init_end(void)
index 551a1eddf0fab75eccf803b9711e069e61e60d5d..a75b11c393128d79107fc447c5109b7d0a786ea5 100644 (file)
@@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
 
        nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
        nf_ct_iterate_destroy(unhelp, me);
+
+       /* Maybe someone has gotten the helper already when unhelp above.
+        * So need to wait it.
+        */
+       synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
 
index 4264570475788be388e603c1bc70330c812d0eb3..a61d6df6e5f64f5b2086d14f35c88a0491f77ce6 100644 (file)
@@ -424,6 +424,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
        if (write) {
                struct ctl_table tmp = *table;
 
+               /* proc_dostring() can append to existing strings, so we need to
+                * initialize it as an empty string.
+                */
+               buf[0] = '\0';
                tmp.data = buf;
                r = proc_dostring(&tmp, write, buffer, lenp, ppos);
                if (r)
@@ -442,14 +446,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
                rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
                mutex_unlock(&nf_log_mutex);
        } else {
+               struct ctl_table tmp = *table;
+
+               tmp.data = buf;
                mutex_lock(&nf_log_mutex);
                logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
                if (!logger)
-                       table->data = "NONE";
+                       strlcpy(buf, "NONE", sizeof(buf));
                else
-                       table->data = logger->name;
-               r = proc_dostring(table, write, buffer, lenp, ppos);
+                       strlcpy(buf, logger->name, sizeof(buf));
                mutex_unlock(&nf_log_mutex);
+               r = proc_dostring(&tmp, write, buffer, lenp, ppos);
        }
 
        return r;
index dc61399e30beb8a40e76dc81bde0f63f39740486..a8c5c846aec104df36dd6810b6877253ce89fef9 100644 (file)
@@ -132,9 +132,10 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
 }
 EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header);
 
-void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk)
+void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
+                           struct sock *sk)
 {
-       if (!sk || !sk_fullsock(sk))
+       if (!sk || !sk_fullsock(sk) || !net_eq(net, sock_net(sk)))
                return;
 
        read_lock_bh(&sk->sk_callback_lock);
index 46f9df99d276c3be7ff5839ba41273df38e59a72..86df2a1666fdd4bf06c6e39e8c9d3d4472673db1 100644 (file)
@@ -108,6 +108,7 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
        struct flowi fl;
        unsigned int hh_len;
        struct dst_entry *dst;
+       struct sock *sk = skb->sk;
        int err;
 
        err = xfrm_decode_session(skb, &fl, family);
@@ -119,7 +120,10 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
                dst = ((struct xfrm_dst *)dst)->route;
        dst_hold(dst);
 
-       dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
+       if (sk && !net_eq(net, sock_net(sk)))
+               sk = NULL;
+
+       dst = xfrm_lookup(net, dst, &fl, sk, 0);
        if (IS_ERR(dst))
                return PTR_ERR(dst);
 
index 896d4a36081d4bb527b10c5db27df1a4dab32df8..3f211e1025c18c130899be51d3d36ab1e41251f8 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/skbuff.h>
 #include <linux/netlink.h>
 #include <linux/vmalloc.h>
+#include <linux/rhashtable.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nf_tables.h>
index 4ccd2988f9db637166358335d8e26299c7237bec..ea4ba551abb28cb25c833dc408e23d1313b21bb4 100644 (file)
@@ -1243,6 +1243,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl,
 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
        [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
        [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
+       [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
+       [NFQA_CFG_MASK]         = { .type = NLA_U32 },
+       [NFQA_CFG_FLAGS]        = { .type = NLA_U32 },
 };
 
 static const struct nf_queue_handler nfqh = {
index 1105a23bda5ec93a260dbce5be9175efa2d96c71..2b94dcc4345656a852e171afd7bb853f252ccaf7 100644 (file)
@@ -107,7 +107,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
                break;
        case NFT_META_SKUID:
                sk = skb_to_full_sk(skb);
-               if (!sk || !sk_fullsock(sk))
+               if (!sk || !sk_fullsock(sk) ||
+                   !net_eq(nft_net(pkt), sock_net(sk)))
                        goto err;
 
                read_lock_bh(&sk->sk_callback_lock);
@@ -123,7 +124,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
                break;
        case NFT_META_SKGID:
                sk = skb_to_full_sk(skb);
-               if (!sk || !sk_fullsock(sk))
+               if (!sk || !sk_fullsock(sk) ||
+                   !net_eq(nft_net(pkt), sock_net(sk)))
                        goto err;
 
                read_lock_bh(&sk->sk_callback_lock);
@@ -214,7 +216,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
 #ifdef CONFIG_CGROUP_NET_CLASSID
        case NFT_META_CGROUP:
                sk = skb_to_full_sk(skb);
-               if (!sk || !sk_fullsock(sk))
+               if (!sk || !sk_fullsock(sk) ||
+                   !net_eq(nft_net(pkt), sock_net(sk)))
                        goto err;
                *dest = sock_cgroup_classid(&sk->sk_cgrp_data);
                break;
index 74e1b3bd695417daf3afb725d3183658a3a81342..998c2b546f6db49f7b5a640e3bddbc75556160ee 100644 (file)
@@ -23,6 +23,9 @@ static void nft_socket_eval(const struct nft_expr *expr,
        struct sock *sk = skb->sk;
        u32 *dest = &regs->data[priv->dreg];
 
+       if (sk && !net_eq(nft_net(pkt), sock_net(sk)))
+               sk = NULL;
+
        if (!sk)
                switch(nft_pf(pkt)) {
                case NFPROTO_IPV4:
@@ -39,7 +42,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
                        return;
                }
 
-       if(!sk) {
+       if (!sk) {
                nft_reg_store8(dest, 0);
                return;
        }
index 7df2dece57d30f6c4e921cf3eeff40f5319b672a..5d92e178198088b85d040473f909aa9eab78c18e 100644 (file)
@@ -72,8 +72,9 @@ static bool
 cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
 {
        const struct xt_cgroup_info_v0 *info = par->matchinfo;
+       struct sock *sk = skb->sk;
 
-       if (skb->sk == NULL || !sk_fullsock(skb->sk))
+       if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
                return false;
 
        return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^
@@ -85,8 +86,9 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
        const struct xt_cgroup_info_v1 *info = par->matchinfo;
        struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
        struct cgroup *ancestor = info->priv;
+       struct sock *sk = skb->sk;
 
-       if (!skb->sk || !sk_fullsock(skb->sk))
+       if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
                return false;
 
        if (ancestor)
index 3d705c688a27b53afdcb53460ed6509e3e8024f4..46686fb73784bf71c79282e87e3f01f2c0411f5c 100644 (file)
@@ -67,7 +67,7 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
        struct sock *sk = skb_to_full_sk(skb);
        struct net *net = xt_net(par);
 
-       if (sk == NULL || sk->sk_socket == NULL)
+       if (!sk || !sk->sk_socket || !net_eq(net, sock_net(sk)))
                return (info->match ^ info->invert) == 0;
        else if (info->match & info->invert & XT_OWNER_SOCKET)
                /*
index 07085c22b19c4d7e0970638b1e361c0f99a2c1dc..f44de4bc2100a811f4c2886668e390a9ac74a82a 100644 (file)
@@ -265,7 +265,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
        }
 
        /* use TTL as seen before forwarding */
-       if (xt_out(par) != NULL && skb->sk == NULL)
+       if (xt_out(par) != NULL &&
+           (!skb->sk || !net_eq(net, sock_net(skb->sk))))
                ttl++;
 
        spin_lock_bh(&recent_lock);
index 5c0779c4fa3cdb1c628ac3c08e9dd1c373cc8e89..0472f34728423ac1a3ba839a72e4aab167df1091 100644 (file)
@@ -56,8 +56,12 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        struct sk_buff *pskb = (struct sk_buff *)skb;
        struct sock *sk = skb->sk;
 
+       if (!net_eq(xt_net(par), sock_net(sk)))
+               sk = NULL;
+
        if (!sk)
                sk = nf_sk_lookup_slow_v4(xt_net(par), skb, xt_in(par));
+
        if (sk) {
                bool wildcard;
                bool transparent = true;
@@ -113,8 +117,12 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
        struct sk_buff *pskb = (struct sk_buff *)skb;
        struct sock *sk = skb->sk;
 
+       if (!net_eq(xt_net(par), sock_net(sk)))
+               sk = NULL;
+
        if (!sk)
                sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par));
+
        if (sk) {
                bool wildcard;
                bool transparent = true;
index 1189b84413d5a8236f878a9cc99bcfa09368ec69..393573a99a5a34d3ebaad3a71b36293b6c2fb19f 100644 (file)
@@ -2658,7 +2658,7 @@ static const struct proto_ops netlink_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      netlink_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        netlink_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index 93fbcafbf3886d34b0be87244c405b8319df89dd..03f37c4e64fe44cd822952225736084ad151b2e8 100644 (file)
@@ -1355,7 +1355,7 @@ static const struct proto_ops nr_proto_ops = {
        .socketpair     =       sock_no_socketpair,
        .accept         =       nr_accept,
        .getname        =       nr_getname,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .ioctl          =       nr_ioctl,
        .listen         =       nr_listen,
        .shutdown       =       sock_no_shutdown,
index ab5bb14b49af92241b12584925983de43b143bb7..ea0c0c6f187429426f4849347c09b847f0111fff 100644 (file)
@@ -548,13 +548,16 @@ static inline __poll_t llcp_accept_poll(struct sock *parent)
        return 0;
 }
 
-static __poll_t llcp_sock_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
+                                  poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
        pr_debug("%p\n", sk);
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == LLCP_LISTEN)
                return llcp_accept_poll(sk);
 
@@ -896,7 +899,7 @@ static const struct proto_ops llcp_sock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = llcp_sock_accept,
        .getname        = llcp_sock_getname,
-       .poll_mask      = llcp_sock_poll_mask,
+       .poll           = llcp_sock_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = llcp_sock_listen,
        .shutdown       = sock_no_shutdown,
@@ -916,7 +919,7 @@ static const struct proto_ops llcp_rawsock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = llcp_sock_getname,
-       .poll_mask      = llcp_sock_poll_mask,
+       .poll           = llcp_sock_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
index 60c322531c498f1d43582be5b76f3a2f575ed5bc..e2188deb08dc3bb16e2a60808b274a4a092fd2ee 100644 (file)
@@ -284,7 +284,7 @@ static const struct proto_ops rawsock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -304,7 +304,7 @@ static const struct proto_ops rawsock_raw_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
index 30a5df27116ec695d08bd4b57d8ee97b93822403..85ae53d8fd098b80e22a4b3ccc26f83be7b110c5 100644 (file)
@@ -1057,6 +1057,28 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
                             clone_flow_key);
 }
 
+/* When 'last' is true, clone() should always consume the 'skb'.
+ * Otherwise, clone() should keep 'skb' intact regardless what
+ * actions are executed within clone().
+ */
+static int clone(struct datapath *dp, struct sk_buff *skb,
+                struct sw_flow_key *key, const struct nlattr *attr,
+                bool last)
+{
+       struct nlattr *actions;
+       struct nlattr *clone_arg;
+       int rem = nla_len(attr);
+       bool dont_clone_flow_key;
+
+       /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
+       clone_arg = nla_data(attr);
+       dont_clone_flow_key = nla_get_u32(clone_arg);
+       actions = nla_next(clone_arg, &rem);
+
+       return clone_execute(dp, skb, key, 0, actions, rem, last,
+                            !dont_clone_flow_key);
+}
+
 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
                         const struct nlattr *attr)
 {
@@ -1336,6 +1358,17 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                                consume_skb(skb);
                                return 0;
                        }
+                       break;
+
+               case OVS_ACTION_ATTR_CLONE: {
+                       bool last = nla_is_last(a, rem);
+
+                       err = clone(dp, skb, key, a, last);
+                       if (last)
+                               return err;
+
+                       break;
+               }
                }
 
                if (unlikely(err)) {
index 492ab0c36f7c9e3caf6de7e7d77368028716e09c..a70097ecf33c2bf9e9df7b92c2359ab679ae6d7e 100644 (file)
@@ -2460,6 +2460,40 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
        return 0;
 }
 
+static int validate_and_copy_clone(struct net *net,
+                                  const struct nlattr *attr,
+                                  const struct sw_flow_key *key,
+                                  struct sw_flow_actions **sfa,
+                                  __be16 eth_type, __be16 vlan_tci,
+                                  bool log, bool last)
+{
+       int start, err;
+       u32 exec;
+
+       if (nla_len(attr) && nla_len(attr) < NLA_HDRLEN)
+               return -EINVAL;
+
+       start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CLONE, log);
+       if (start < 0)
+               return start;
+
+       exec = last || !actions_may_change_flow(attr);
+
+       err = ovs_nla_add_action(sfa, OVS_CLONE_ATTR_EXEC, &exec,
+                                sizeof(exec), log);
+       if (err)
+               return err;
+
+       err = __ovs_nla_copy_actions(net, attr, key, sfa,
+                                    eth_type, vlan_tci, log);
+       if (err)
+               return err;
+
+       add_nested_action_end(*sfa, start);
+
+       return 0;
+}
+
 void ovs_match_init(struct sw_flow_match *match,
                    struct sw_flow_key *key,
                    bool reset_key,
@@ -2516,7 +2550,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
        struct ovs_tunnel_info *ovs_tun;
        struct nlattr *a;
        int err = 0, start, opts_type;
+       __be16 dst_opt_type;
 
+       dst_opt_type = 0;
        ovs_match_init(&match, &key, true, NULL);
        opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
        if (opts_type < 0)
@@ -2528,10 +2564,13 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
                        err = validate_geneve_opts(&key);
                        if (err < 0)
                                return err;
+                       dst_opt_type = TUNNEL_GENEVE_OPT;
                        break;
                case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
+                       dst_opt_type = TUNNEL_VXLAN_OPT;
                        break;
                case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
+                       dst_opt_type = TUNNEL_ERSPAN_OPT;
                        break;
                }
        }
@@ -2574,7 +2613,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
         */
        ip_tunnel_info_opts_set(tun_info,
                                TUN_METADATA_OPTS(&key, key.tun_opts_len),
-                               key.tun_opts_len);
+                               key.tun_opts_len, dst_opt_type);
        add_nested_action_end(*sfa, start);
 
        return err;
@@ -2844,6 +2883,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
                        [OVS_ACTION_ATTR_PUSH_NSH] = (u32)-1,
                        [OVS_ACTION_ATTR_POP_NSH] = 0,
                        [OVS_ACTION_ATTR_METER] = sizeof(u32),
+                       [OVS_ACTION_ATTR_CLONE] = (u32)-1,
                };
                const struct ovs_action_push_vlan *vlan;
                int type = nla_type(a);
@@ -3033,6 +3073,18 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
                        /* Non-existent meters are simply ignored.  */
                        break;
 
+               case OVS_ACTION_ATTR_CLONE: {
+                       bool last = nla_is_last(a, rem);
+
+                       err = validate_and_copy_clone(net, a, key, sfa,
+                                                     eth_type, vlan_tci,
+                                                     log, last);
+                       if (err)
+                               return err;
+                       skip_copy = true;
+                       break;
+               }
+
                default:
                        OVS_NLERR(log, "Unknown Action type %d", type);
                        return -EINVAL;
@@ -3111,6 +3163,26 @@ static int sample_action_to_attr(const struct nlattr *attr,
        return err;
 }
 
+static int clone_action_to_attr(const struct nlattr *attr,
+                               struct sk_buff *skb)
+{
+       struct nlattr *start;
+       int err = 0, rem = nla_len(attr);
+
+       start = nla_nest_start(skb, OVS_ACTION_ATTR_CLONE);
+       if (!start)
+               return -EMSGSIZE;
+
+       err = ovs_nla_put_actions(nla_data(attr), rem, skb);
+
+       if (err)
+               nla_nest_cancel(skb, start);
+       else
+               nla_nest_end(skb, start);
+
+       return err;
+}
+
 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
 {
        const struct nlattr *ovs_key = nla_data(a);
@@ -3199,6 +3271,12 @@ int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
                                return err;
                        break;
 
+               case OVS_ACTION_ATTR_CLONE:
+                       err = clone_action_to_attr(a, skb);
+                       if (err)
+                               return err;
+                       break;
+
                default:
                        if (nla_put(skb, type, nla_len(a), nla_data(a)))
                                return -EMSGSIZE;
index 50809748c1279ea17b7499acbec5699443804f64..00189a3b07f2161dbdde3a451af1b01f51162afd 100644 (file)
@@ -275,9 +275,10 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
        return po->xmit == packet_direct_xmit;
 }
 
-static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
+                                 struct net_device *sb_dev)
 {
-       return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
+       return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
 }
 
 static u16 packet_pick_tx_queue(struct sk_buff *skb)
@@ -291,7 +292,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb)
                                                    __packet_pick_tx_queue);
                queue_index = netdev_cap_txqueue(dev, queue_index);
        } else {
-               queue_index = __packet_pick_tx_queue(dev, skb);
+               queue_index = __packet_pick_tx_queue(dev, skb, NULL);
        }
 
        return queue_index;
@@ -1951,7 +1952,7 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
                goto out_unlock;
        }
 
-       sockc.tsflags = sk->sk_tsflags;
+       sockcm_init(&sockc, sk);
        if (msg->msg_controllen) {
                err = sock_cmsg_send(sk, msg, &sockc);
                if (unlikely(err))
@@ -1962,6 +1963,7 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
        skb->dev = dev;
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
+       skb->tstamp = sockc.transmit_time;
 
        sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
 
@@ -2262,6 +2264,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                if (po->stats.stats1.tp_drops)
                        status |= TP_STATUS_LOSING;
        }
+
+       if (do_vnet &&
+           virtio_net_hdr_from_skb(skb, h.raw + macoff -
+                                   sizeof(struct virtio_net_hdr),
+                                   vio_le(), true, 0))
+               goto drop_n_account;
+
        po->stats.stats1.tp_packets++;
        if (copy_skb) {
                status |= TP_STATUS_COPY;
@@ -2269,15 +2278,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        }
        spin_unlock(&sk->sk_receive_queue.lock);
 
-       if (do_vnet) {
-               if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
-                                           sizeof(struct virtio_net_hdr),
-                                           vio_le(), true, 0)) {
-                       spin_lock(&sk->sk_receive_queue.lock);
-                       goto drop_n_account;
-               }
-       }
-
        skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
 
        if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
@@ -2459,6 +2459,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
        skb->dev = dev;
        skb->priority = po->sk.sk_priority;
        skb->mark = po->sk.sk_mark;
+       skb->tstamp = sockc->transmit_time;
        sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
        skb_shinfo(skb)->destructor_arg = ph.raw;
 
@@ -2635,7 +2636,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        if (unlikely(!(dev->flags & IFF_UP)))
                goto out_put;
 
-       sockc.tsflags = po->sk.sk_tsflags;
+       sockcm_init(&sockc, &po->sk);
        if (msg->msg_controllen) {
                err = sock_cmsg_send(&po->sk, msg, &sockc);
                if (unlikely(err))
@@ -2831,7 +2832,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        if (unlikely(!(dev->flags & IFF_UP)))
                goto out_unlock;
 
-       sockc.tsflags = sk->sk_tsflags;
+       sockcm_init(&sockc, sk);
        sockc.mark = sk->sk_mark;
        if (msg->msg_controllen) {
                err = sock_cmsg_send(sk, msg, &sockc);
@@ -2905,6 +2906,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        skb->dev = dev;
        skb->priority = sk->sk_priority;
        skb->mark = sockc.mark;
+       skb->tstamp = sockc.transmit_time;
 
        if (has_vnet_hdr) {
                err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
@@ -4078,11 +4080,12 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
        return 0;
 }
 
-static __poll_t packet_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t packet_poll(struct file *file, struct socket *sock,
+                               poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
 
        spin_lock_bh(&sk->sk_receive_queue.lock);
        if (po->rx_ring.pg_vec) {
@@ -4424,7 +4427,7 @@ static const struct proto_ops packet_ops_spkt = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      packet_getname_spkt,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        packet_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
@@ -4445,7 +4448,7 @@ static const struct proto_ops packet_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      packet_getname,
-       .poll_mask =    packet_poll_mask,
+       .poll =         packet_poll,
        .ioctl =        packet_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index c295c4e20f012f31c1b443c5f859969caf412cec..30187990257fdb07a57c03707d6e1af0740b42f0 100644 (file)
@@ -340,12 +340,15 @@ static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
        return sizeof(struct sockaddr_pn);
 }
 
-static __poll_t pn_socket_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
+                                       poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct pep_sock *pn = pep_sk(sk);
        __poll_t mask = 0;
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == TCP_CLOSE)
                return EPOLLERR;
        if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -445,7 +448,7 @@ const struct proto_ops phonet_dgram_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pn_socket_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = pn_socket_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -470,7 +473,7 @@ const struct proto_ops phonet_stream_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = pn_socket_accept,
        .getname        = pn_socket_getname,
-       .poll_mask      = pn_socket_poll_mask,
+       .poll           = pn_socket_poll,
        .ioctl          = pn_socket_ioctl,
        .listen         = pn_socket_listen,
        .shutdown       = sock_no_shutdown,
index 1b5025ea5b0426272145b56fa42e21d908612243..2aa07b547b1685a6aa96155c559dbefd21150281 100644 (file)
@@ -1023,7 +1023,7 @@ static const struct proto_ops qrtr_proto_ops = {
        .recvmsg        = qrtr_recvmsg,
        .getname        = qrtr_getname,
        .ioctl          = qrtr_ioctl,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = sock_no_setsockopt,
        .getsockopt     = sock_no_getsockopt,
index abef75da89a7450092aefc46ed902e6602fba7a6..cfb05953b0e57afad21fd708f0df42d63c77cd55 100644 (file)
@@ -659,11 +659,19 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
 
 int rds_conn_init(void)
 {
+       int ret;
+
+       ret = rds_loop_net_init(); /* register pernet callback */
+       if (ret)
+               return ret;
+
        rds_conn_slab = kmem_cache_create("rds_connection",
                                          sizeof(struct rds_connection),
                                          0, 0, NULL);
-       if (!rds_conn_slab)
+       if (!rds_conn_slab) {
+               rds_loop_net_exit();
                return -ENOMEM;
+       }
 
        rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
        rds_info_register_func(RDS_INFO_SEND_MESSAGES,
@@ -676,6 +684,7 @@ int rds_conn_init(void)
 
 void rds_conn_exit(void)
 {
+       rds_loop_net_exit(); /* unregister pernet callback */
        rds_loop_exit();
 
        WARN_ON(!hlist_empty(rds_conn_hash));
index b4e421aa9727942e0cbe7ba40e11c3d19d937868..1eaf2550a9f8287eebf3895a6ea55ed779225b33 100644 (file)
@@ -376,8 +376,6 @@ static void release_refill(struct rds_connection *conn)
  * This tries to allocate and post unused work requests after making sure that
  * they have all the allocations they need to queue received fragments into
  * sockets.
- *
- * -1 is returned if posting fails due to temporary resource exhaustion.
  */
 void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
 {
@@ -1025,7 +1023,6 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
 {
        struct rds_connection *conn = cp->cp_conn;
        struct rds_ib_connection *ic = conn->c_transport_data;
-       int ret = 0;
 
        rdsdebug("conn %p\n", conn);
        if (rds_conn_up(conn)) {
@@ -1034,7 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
                rds_ib_stats_inc(s_ib_rx_refill_from_thread);
        }
 
-       return ret;
+       return 0;
 }
 
 int rds_ib_recv_init(void)
index dac6218a460ed4d4a5b7b03ad4f6056a68784a16..feea1f96ee2ad582dce8f815442da1bbf6e0508a 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/in.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include "rds_single_path.h"
 #include "rds.h"
 
 static DEFINE_SPINLOCK(loop_conns_lock);
 static LIST_HEAD(loop_conns);
+static atomic_t rds_loop_unloading = ATOMIC_INIT(0);
+
+static void rds_loop_set_unloading(void)
+{
+       atomic_set(&rds_loop_unloading, 1);
+}
+
+static bool rds_loop_is_unloading(struct rds_connection *conn)
+{
+       return atomic_read(&rds_loop_unloading) != 0;
+}
 
 /*
  * This 'loopback' transport is a special case for flows that originate
@@ -165,6 +178,8 @@ void rds_loop_exit(void)
        struct rds_loop_connection *lc, *_lc;
        LIST_HEAD(tmp_list);
 
+       rds_loop_set_unloading();
+       synchronize_rcu();
        /* avoid calling conn_destroy with irqs off */
        spin_lock_irq(&loop_conns_lock);
        list_splice(&loop_conns, &tmp_list);
@@ -177,6 +192,46 @@ void rds_loop_exit(void)
        }
 }
 
+static void rds_loop_kill_conns(struct net *net)
+{
+       struct rds_loop_connection *lc, *_lc;
+       LIST_HEAD(tmp_list);
+
+       spin_lock_irq(&loop_conns_lock);
+       list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node)  {
+               struct net *c_net = read_pnet(&lc->conn->c_net);
+
+               if (net != c_net)
+                       continue;
+               list_move_tail(&lc->loop_node, &tmp_list);
+       }
+       spin_unlock_irq(&loop_conns_lock);
+
+       list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
+               WARN_ON(lc->conn->c_passive);
+               rds_conn_destroy(lc->conn);
+       }
+}
+
+static void __net_exit rds_loop_exit_net(struct net *net)
+{
+       rds_loop_kill_conns(net);
+}
+
+static struct pernet_operations rds_loop_net_ops = {
+       .exit = rds_loop_exit_net,
+};
+
+int rds_loop_net_init(void)
+{
+       return register_pernet_device(&rds_loop_net_ops);
+}
+
+void rds_loop_net_exit(void)
+{
+       unregister_pernet_device(&rds_loop_net_ops);
+}
+
 /*
  * This is missing .xmit_* because loop doesn't go through generic
  * rds_send_xmit() and doesn't call rds_recv_incoming().  .listen_stop and
@@ -194,4 +249,5 @@ struct rds_transport rds_loop_transport = {
        .inc_free               = rds_loop_inc_free,
        .t_name                 = "loopback",
        .t_type                 = RDS_TRANS_LOOP,
+       .t_unloading            = rds_loop_is_unloading,
 };
index 469fa4b2da4f38b5fb62358507cb9d9ca62aa825..bbc8cdd030df3137ea250578cb3d429a86fd68f2 100644 (file)
@@ -5,6 +5,8 @@
 /* loop.c */
 extern struct rds_transport rds_loop_transport;
 
+int rds_loop_net_init(void);
+void rds_loop_net_exit(void);
 void rds_loop_exit(void);
 
 #endif
index ebe42e7eb45697030367c4baba455b50c973c409..d00a0ef39a56b38cae4114654c44a3bddccb35ba 100644 (file)
@@ -1470,7 +1470,7 @@ static const struct proto_ops rose_proto_ops = {
        .socketpair     =       sock_no_socketpair,
        .accept         =       rose_accept,
        .getname        =       rose_getname,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .ioctl          =       rose_ioctl,
        .listen         =       rose_listen,
        .shutdown       =       sock_no_shutdown,
index 3b1ac93efee22248ab01c3c8a610e874e99356b5..2b463047dd7ba93267feb584e1ffda280449a0b3 100644 (file)
@@ -734,11 +734,15 @@ static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
 /*
  * permit an RxRPC socket to be polled
  */
-static __poll_t rxrpc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
+                              poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct rxrpc_sock *rx = rxrpc_sk(sk);
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* the socket is readable if there are any messages waiting on the Rx
         * queue */
@@ -945,7 +949,7 @@ static const struct proto_ops rxrpc_rpc_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = rxrpc_poll_mask,
+       .poll           = rxrpc_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = rxrpc_listen,
        .shutdown       = rxrpc_shutdown,
index a01169fb5325754c13c8b2b18facc29a1e24f243..7af246764a3554e3c151dbf9af6031986eacaba4 100644 (file)
@@ -183,6 +183,17 @@ config NET_SCH_CBS
          To compile this code as a module, choose M here: the
          module will be called sch_cbs.
 
+config NET_SCH_ETF
+       tristate "Earliest TxTime First (ETF)"
+       help
+         Say Y here if you want to use the Earliest TxTime First (ETF) packet
+         scheduling algorithm.
+
+         See the top of <file:net/sched/sch_etf.c> for more details.
+
+         To compile this code as a module, choose M here: the
+         module will be called sch_etf.
+
 config NET_SCH_GRED
        tristate "Generic Random Early Detection (GRED)"
        ---help---
@@ -284,6 +295,17 @@ config NET_SCH_FQ_CODEL
 
          If unsure, say N.
 
+config NET_SCH_CAKE
+       tristate "Common Applications Kept Enhanced (CAKE)"
+       help
+         Say Y here if you want to use the Common Applications Kept Enhanced
+          (CAKE) queue management algorithm.
+
+         To compile this driver as a module, choose M here: the module
+         will be called sch_cake.
+
+         If unsure, say N.
+
 config NET_SCH_FQ
        tristate "Fair Queue"
        help
index 8811d38048785f43334da160226709217d72ea97..673ee7d26ff2f4bd1d58b0de277570933fc4a2ed 100644 (file)
@@ -50,10 +50,12 @@ obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
 obj-$(CONFIG_NET_SCH_QFQ)      += sch_qfq.o
 obj-$(CONFIG_NET_SCH_CODEL)    += sch_codel.o
 obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
+obj-$(CONFIG_NET_SCH_CAKE)     += sch_cake.o
 obj-$(CONFIG_NET_SCH_FQ)       += sch_fq.o
 obj-$(CONFIG_NET_SCH_HHF)      += sch_hhf.o
 obj-$(CONFIG_NET_SCH_PIE)      += sch_pie.o
 obj-$(CONFIG_NET_SCH_CBS)      += sch_cbs.o
+obj-$(CONFIG_NET_SCH_ETF)      += sch_etf.o
 
 obj-$(CONFIG_NET_CLS_U32)      += cls_u32.o
 obj-$(CONFIG_NET_CLS_ROUTE4)   += cls_route.o
index 3f4cf930f809bbaca12a84caabc31f5c2b8d769c..148a89ab789b56bdc8117035431d827a22753494 100644 (file)
@@ -55,6 +55,24 @@ static void tcf_action_goto_chain_exec(const struct tc_action *a,
        res->goto_tp = rcu_dereference_bh(chain->filter_chain);
 }
 
+static void tcf_free_cookie_rcu(struct rcu_head *p)
+{
+       struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
+
+       kfree(cookie->data);
+       kfree(cookie);
+}
+
+static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
+                                 struct tc_cookie *new_cookie)
+{
+       struct tc_cookie *old;
+
+       old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
+       if (old)
+               call_rcu(&old->rcu, tcf_free_cookie_rcu);
+}
+
 /* XXX: For standalone actions, we don't need a RCU grace period either, because
  * actions are always connected to filters and filters are already destroyed in
  * RCU callbacks, so after a RCU grace period actions are already disconnected
@@ -65,44 +83,64 @@ static void free_tcf(struct tc_action *p)
        free_percpu(p->cpu_bstats);
        free_percpu(p->cpu_qstats);
 
-       if (p->act_cookie) {
-               kfree(p->act_cookie->data);
-               kfree(p->act_cookie);
-       }
+       tcf_set_action_cookie(&p->act_cookie, NULL);
        if (p->goto_chain)
                tcf_action_goto_chain_fini(p);
 
        kfree(p);
 }
 
-static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
+static void tcf_action_cleanup(struct tc_action *p)
 {
-       spin_lock(&idrinfo->lock);
-       idr_remove(&idrinfo->action_idr, p->tcfa_index);
-       spin_unlock(&idrinfo->lock);
+       if (p->ops->cleanup)
+               p->ops->cleanup(p);
+
        gen_kill_estimator(&p->tcfa_rate_est);
        free_tcf(p);
 }
 
+static int __tcf_action_put(struct tc_action *p, bool bind)
+{
+       struct tcf_idrinfo *idrinfo = p->idrinfo;
+
+       if (refcount_dec_and_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
+               if (bind)
+                       atomic_dec(&p->tcfa_bindcnt);
+               idr_remove(&idrinfo->action_idr, p->tcfa_index);
+               spin_unlock(&idrinfo->lock);
+
+               tcf_action_cleanup(p);
+               return 1;
+       }
+
+       if (bind)
+               atomic_dec(&p->tcfa_bindcnt);
+
+       return 0;
+}
+
 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 {
        int ret = 0;
 
-       ASSERT_RTNL();
-
+       /* Release with strict==1 and bind==0 is only called through act API
+        * interface (classifiers always bind). Only case when action with
+        * positive reference count and zero bind count can exist is when it was
+        * also created with act API (unbinding last classifier will destroy the
+        * action if it was created by classifier). So only case when bind count
+        * can be changed after initial check is when unbound action is
+        * destroyed by act API while classifier binds to action with same id
+        * concurrently. This result either creation of new action(same behavior
+        * as before), or reusing existing action if concurrent process
+        * increments reference count before action is deleted. Both scenarios
+        * are acceptable.
+        */
        if (p) {
-               if (bind)
-                       p->tcfa_bindcnt--;
-               else if (strict && p->tcfa_bindcnt > 0)
+               if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
                        return -EPERM;
 
-               p->tcfa_refcnt--;
-               if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
-                       if (p->ops->cleanup)
-                               p->ops->cleanup(p);
-                       tcf_idr_remove(p->idrinfo, p);
+               if (__tcf_action_put(p, bind))
                        ret = ACT_P_DELETED;
-               }
        }
 
        return ret;
@@ -111,10 +149,15 @@ EXPORT_SYMBOL(__tcf_idr_release);
 
 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
 {
+       struct tc_cookie *act_cookie;
        u32 cookie_len = 0;
 
-       if (act->act_cookie)
-               cookie_len = nla_total_size(act->act_cookie->len);
+       rcu_read_lock();
+       act_cookie = rcu_dereference(act->act_cookie);
+
+       if (act_cookie)
+               cookie_len = nla_total_size(act_cookie->len);
+       rcu_read_unlock();
 
        return  nla_total_size(0) /* action number nested */
                + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
@@ -257,46 +300,77 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(tcf_generic_walker);
 
-static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo)
+static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
+                           struct tc_action **a, int bind)
 {
-       struct tc_action *p = NULL;
+       struct tcf_idrinfo *idrinfo = tn->idrinfo;
+       struct tc_action *p;
 
        spin_lock(&idrinfo->lock);
        p = idr_find(&idrinfo->action_idr, index);
+       if (IS_ERR(p)) {
+               p = NULL;
+       } else if (p) {
+               refcount_inc(&p->tcfa_refcnt);
+               if (bind)
+                       atomic_inc(&p->tcfa_bindcnt);
+       }
        spin_unlock(&idrinfo->lock);
 
-       return p;
+       if (p) {
+               *a = p;
+               return true;
+       }
+       return false;
 }
 
 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
 {
-       struct tcf_idrinfo *idrinfo = tn->idrinfo;
-       struct tc_action *p = tcf_idr_lookup(index, idrinfo);
-
-       if (p) {
-               *a = p;
-               return 1;
-       }
-       return 0;
+       return __tcf_idr_check(tn, index, a, 0);
 }
 EXPORT_SYMBOL(tcf_idr_search);
 
 bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
                   int bind)
+{
+       return __tcf_idr_check(tn, index, a, bind);
+}
+EXPORT_SYMBOL(tcf_idr_check);
+
+int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
 {
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
-       struct tc_action *p = tcf_idr_lookup(index, idrinfo);
+       struct tc_action *p;
+       int ret = 0;
 
-       if (index && p) {
-               if (bind)
-                       p->tcfa_bindcnt++;
-               p->tcfa_refcnt++;
-               *a = p;
-               return true;
+       spin_lock(&idrinfo->lock);
+       p = idr_find(&idrinfo->action_idr, index);
+       if (!p) {
+               spin_unlock(&idrinfo->lock);
+               return -ENOENT;
        }
-       return false;
+
+       if (!atomic_read(&p->tcfa_bindcnt)) {
+               if (refcount_dec_and_test(&p->tcfa_refcnt)) {
+                       struct module *owner = p->ops->owner;
+
+                       WARN_ON(p != idr_remove(&idrinfo->action_idr,
+                                               p->tcfa_index));
+                       spin_unlock(&idrinfo->lock);
+
+                       tcf_action_cleanup(p);
+                       module_put(owner);
+                       return 0;
+               }
+               ret = 0;
+       } else {
+               ret = -EPERM;
+       }
+
+       spin_unlock(&idrinfo->lock);
+       return ret;
 }
-EXPORT_SYMBOL(tcf_idr_check);
+EXPORT_SYMBOL(tcf_idr_delete_index);
 
 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                   struct tc_action **a, const struct tc_action_ops *ops,
@@ -304,14 +378,13 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
 {
        struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
-       struct idr *idr = &idrinfo->action_idr;
        int err = -ENOMEM;
 
        if (unlikely(!p))
                return -ENOMEM;
-       p->tcfa_refcnt = 1;
+       refcount_set(&p->tcfa_refcnt, 1);
        if (bind)
-               p->tcfa_bindcnt = 1;
+               atomic_set(&p->tcfa_bindcnt, 1);
 
        if (cpustats) {
                p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
@@ -322,20 +395,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                        goto err2;
        }
        spin_lock_init(&p->tcfa_lock);
-       idr_preload(GFP_KERNEL);
-       spin_lock(&idrinfo->lock);
-       /* user doesn't specify an index */
-       if (!index) {
-               index = 1;
-               err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC);
-       } else {
-               err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC);
-       }
-       spin_unlock(&idrinfo->lock);
-       idr_preload_end();
-       if (err)
-               goto err3;
-
        p->tcfa_index = index;
        p->tcfa_tm.install = jiffies;
        p->tcfa_tm.lastuse = jiffies;
@@ -345,7 +404,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                                        &p->tcfa_rate_est,
                                        &p->tcfa_lock, NULL, est);
                if (err)
-                       goto err4;
+                       goto err3;
        }
 
        p->idrinfo = idrinfo;
@@ -353,8 +412,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
        INIT_LIST_HEAD(&p->list);
        *a = p;
        return 0;
-err4:
-       idr_remove(idr, index);
 err3:
        free_percpu(p->cpu_qstats);
 err2:
@@ -370,11 +427,78 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
 
        spin_lock(&idrinfo->lock);
-       idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
+       /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
+       WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index)));
        spin_unlock(&idrinfo->lock);
 }
 EXPORT_SYMBOL(tcf_idr_insert);
 
+/* Cleanup idr index that was allocated but not initialized. */
+
+void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
+{
+       struct tcf_idrinfo *idrinfo = tn->idrinfo;
+
+       spin_lock(&idrinfo->lock);
+       /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
+       WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
+       spin_unlock(&idrinfo->lock);
+}
+EXPORT_SYMBOL(tcf_idr_cleanup);
+
+/* Check if action with specified index exists. If actions is found, increments
+ * its reference and bind counters, and return 1. Otherwise insert temporary
+ * error pointer (to prevent concurrent users from inserting actions with same
+ * index) and return 0.
+ */
+
+int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+                       struct tc_action **a, int bind)
+{
+       struct tcf_idrinfo *idrinfo = tn->idrinfo;
+       struct tc_action *p;
+       int ret;
+
+again:
+       spin_lock(&idrinfo->lock);
+       if (*index) {
+               p = idr_find(&idrinfo->action_idr, *index);
+               if (IS_ERR(p)) {
+                       /* This means that another process allocated
+                        * index but did not assign the pointer yet.
+                        */
+                       spin_unlock(&idrinfo->lock);
+                       goto again;
+               }
+
+               if (p) {
+                       refcount_inc(&p->tcfa_refcnt);
+                       if (bind)
+                               atomic_inc(&p->tcfa_bindcnt);
+                       *a = p;
+                       ret = 1;
+               } else {
+                       *a = NULL;
+                       ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
+                                           *index, GFP_ATOMIC);
+                       if (!ret)
+                               idr_replace(&idrinfo->action_idr,
+                                           ERR_PTR(-EBUSY), *index);
+               }
+       } else {
+               *index = 1;
+               *a = NULL;
+               ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
+                                   UINT_MAX, GFP_ATOMIC);
+               if (!ret)
+                       idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
+                                   *index);
+       }
+       spin_unlock(&idrinfo->lock);
+       return ret;
+}
+EXPORT_SYMBOL(tcf_idr_check_alloc);
+
 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
                         struct tcf_idrinfo *idrinfo)
 {
@@ -538,13 +662,15 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
 }
 EXPORT_SYMBOL(tcf_action_exec);
 
-int tcf_action_destroy(struct list_head *actions, int bind)
+int tcf_action_destroy(struct tc_action *actions[], int bind)
 {
        const struct tc_action_ops *ops;
-       struct tc_action *a, *tmp;
-       int ret = 0;
+       struct tc_action *a;
+       int ret = 0, i;
 
-       list_for_each_entry_safe(a, tmp, actions, list) {
+       for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+               a = actions[i];
+               actions[i] = NULL;
                ops = a->ops;
                ret = __tcf_idr_release(a, bind, true);
                if (ret == ACT_P_DELETED)
@@ -555,6 +681,24 @@ int tcf_action_destroy(struct list_head *actions, int bind)
        return ret;
 }
 
+static int tcf_action_put(struct tc_action *p)
+{
+       return __tcf_action_put(p, false);
+}
+
+static void tcf_action_put_many(struct tc_action *actions[])
+{
+       int i;
+
+       for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+               struct tc_action *a = actions[i];
+               const struct tc_action_ops *ops = a->ops;
+
+               if (tcf_action_put(a))
+                       module_put(ops->owner);
+       }
+}
+
 int
 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
@@ -567,16 +711,22 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        int err = -EINVAL;
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
+       struct tc_cookie *cookie;
 
        if (nla_put_string(skb, TCA_KIND, a->ops->kind))
                goto nla_put_failure;
        if (tcf_action_copy_stats(skb, a, 0))
                goto nla_put_failure;
-       if (a->act_cookie) {
-               if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len,
-                           a->act_cookie->data))
+
+       rcu_read_lock();
+       cookie = rcu_dereference(a->act_cookie);
+       if (cookie) {
+               if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
+                       rcu_read_unlock();
                        goto nla_put_failure;
+               }
        }
+       rcu_read_unlock();
 
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
@@ -593,14 +743,15 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 }
 EXPORT_SYMBOL(tcf_action_dump_1);
 
-int tcf_action_dump(struct sk_buff *skb, struct list_head *actions,
+int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
                    int bind, int ref)
 {
        struct tc_action *a;
-       int err = -EINVAL;
+       int err = -EINVAL, i;
        struct nlattr *nest;
 
-       list_for_each_entry(a, actions, list) {
+       for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+               a = actions[i];
                nest = nla_nest_start(skb, a->order);
                if (nest == NULL)
                        goto nla_put_failure;
@@ -638,6 +789,7 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
+                                   bool rtnl_held,
                                    struct netlink_ext_ack *extack)
 {
        struct tc_action *a;
@@ -688,9 +840,11 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        a_o = tc_lookup_action_n(act_name);
        if (a_o == NULL) {
 #ifdef CONFIG_MODULES
-               rtnl_unlock();
+               if (rtnl_held)
+                       rtnl_unlock();
                request_module("act_%s", act_name);
-               rtnl_lock();
+               if (rtnl_held)
+                       rtnl_lock();
 
                a_o = tc_lookup_action_n(act_name);
 
@@ -713,19 +867,15 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        /* backward compatibility for policer */
        if (name == NULL)
                err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
-                               extack);
+                               rtnl_held, extack);
        else
-               err = a_o->init(net, nla, est, &a, ovr, bind, extack);
+               err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
+                               extack);
        if (err < 0)
                goto err_mod;
 
-       if (name == NULL && tb[TCA_ACT_COOKIE]) {
-               if (a->act_cookie) {
-                       kfree(a->act_cookie->data);
-                       kfree(a->act_cookie);
-               }
-               a->act_cookie = cookie;
-       }
+       if (!name && tb[TCA_ACT_COOKIE])
+               tcf_set_action_cookie(&a->act_cookie, cookie);
 
        /* module count goes up only when brand new policy is created
         * if it exists and is only bound to in a_o->init() then
@@ -737,10 +887,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
                err = tcf_action_goto_chain_init(a, tp);
                if (err) {
-                       LIST_HEAD(actions);
+                       struct tc_action *actions[] = { a, NULL };
 
-                       list_add_tail(&a->list, &actions);
-                       tcf_action_destroy(&actions, bind);
+                       tcf_action_destroy(actions, bind);
                        NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
                        return ERR_PTR(err);
                }
@@ -758,21 +907,12 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        return ERR_PTR(err);
 }
 
-static void cleanup_a(struct list_head *actions, int ovr)
-{
-       struct tc_action *a;
-
-       if (!ovr)
-               return;
-
-       list_for_each_entry(a, actions, list)
-               a->tcfa_refcnt--;
-}
+/* Returns numbers of initialized actions or negative error. */
 
 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
                    struct nlattr *est, char *name, int ovr, int bind,
-                   struct list_head *actions, size_t *attr_size,
-                   struct netlink_ext_ack *extack)
+                   struct tc_action *actions[], size_t *attr_size,
+                   bool rtnl_held, struct netlink_ext_ack *extack)
 {
        struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct tc_action *act;
@@ -786,25 +926,19 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
 
        for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
                act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
-                                       extack);
+                                       rtnl_held, extack);
                if (IS_ERR(act)) {
                        err = PTR_ERR(act);
                        goto err;
                }
                act->order = i;
                sz += tcf_action_fill_size(act);
-               if (ovr)
-                       act->tcfa_refcnt++;
-               list_add_tail(&act->list, actions);
+               /* Start from index 0 */
+               actions[i - 1] = act;
        }
 
        *attr_size = tcf_action_full_attrs_size(sz);
-
-       /* Remove the temp refcnt which was necessary to protect against
-        * destroying an existing action which was being replaced
-        */
-       cleanup_a(actions, ovr);
-       return 0;
+       return i - 1;
 
 err:
        tcf_action_destroy(actions, bind);
@@ -855,7 +989,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
        return -1;
 }
 
-static int tca_get_fill(struct sk_buff *skb, struct list_head *actions,
+static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
                        u32 portid, u32 seq, u16 flags, int event, int bind,
                        int ref)
 {
@@ -891,7 +1025,7 @@ static int tca_get_fill(struct sk_buff *skb, struct list_head *actions,
 
 static int
 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
-              struct list_head *actions, int event,
+              struct tc_action *actions[], int event,
               struct netlink_ext_ack *extack)
 {
        struct sk_buff *skb;
@@ -900,7 +1034,7 @@ tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
        if (!skb)
                return -ENOBUFS;
        if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
-                        0, 0) <= 0) {
+                        0, 1) <= 0) {
                NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
                kfree_skb(skb);
                return -EINVAL;
@@ -1027,9 +1161,41 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        return err;
 }
 
+static int tcf_action_delete(struct net *net, struct tc_action *actions[],
+                            int *acts_deleted, struct netlink_ext_ack *extack)
+{
+       u32 act_index;
+       int ret, i;
+
+       for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+               struct tc_action *a = actions[i];
+               const struct tc_action_ops *ops = a->ops;
+
+               /* Actions can be deleted concurrently so we must save their
+                * type and id to search again after reference is released.
+                */
+               act_index = a->tcfa_index;
+
+               if (tcf_action_put(a)) {
+                       /* last reference, action was deleted concurrently */
+                       module_put(ops->owner);
+               } else  {
+                       /* now do the delete */
+                       ret = ops->delete(net, act_index);
+                       if (ret < 0) {
+                               *acts_deleted = i + 1;
+                               return ret;
+                       }
+               }
+       }
+       *acts_deleted = i;
+       return 0;
+}
+
 static int
-tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
-              u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
+tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
+              int *acts_deleted, u32 portid, size_t attr_size,
+              struct netlink_ext_ack *extack)
 {
        int ret;
        struct sk_buff *skb;
@@ -1040,14 +1206,14 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
                return -ENOBUFS;
 
        if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
-                        0, 1) <= 0) {
+                        0, 2) <= 0) {
                NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
                kfree_skb(skb);
                return -EINVAL;
        }
 
        /* now do the delete */
-       ret = tcf_action_destroy(actions, 0);
+       ret = tcf_action_delete(net, actions, acts_deleted, extack);
        if (ret < 0) {
                NL_SET_ERR_MSG(extack, "Failed to delete TC action");
                kfree_skb(skb);
@@ -1069,7 +1235,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
        struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct tc_action *act;
        size_t attr_size = 0;
-       LIST_HEAD(actions);
+       struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {};
+       int acts_deleted = 0;
 
        ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
        if (ret < 0)
@@ -1091,27 +1258,27 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
                }
                act->order = i;
                attr_size += tcf_action_fill_size(act);
-               list_add_tail(&act->list, &actions);
+               actions[i - 1] = act;
        }
 
        attr_size = tcf_action_full_attrs_size(attr_size);
 
        if (event == RTM_GETACTION)
-               ret = tcf_get_notify(net, portid, n, &actions, event, extack);
+               ret = tcf_get_notify(net, portid, n, actions, event, extack);
        else { /* delete */
-               ret = tcf_del_notify(net, n, &actions, portid, attr_size, extack);
+               ret = tcf_del_notify(net, n, actions, &acts_deleted, portid,
+                                    attr_size, extack);
                if (ret)
                        goto err;
                return ret;
        }
 err:
-       if (event != RTM_GETACTION)
-               tcf_action_destroy(&actions, 0);
+       tcf_action_put_many(&actions[acts_deleted]);
        return ret;
 }
 
 static int
-tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
+tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
               u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
 {
        struct sk_buff *skb;
@@ -1142,14 +1309,17 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
 {
        size_t attr_size = 0;
        int ret = 0;
-       LIST_HEAD(actions);
+       struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
 
-       ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions,
-                             &attr_size, extack);
-       if (ret)
+       ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
+                             &attr_size, true, extack);
+       if (ret < 0)
                return ret;
+       ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
+       if (ovr)
+               tcf_action_put_many(actions);
 
-       return tcf_add_notify(net, n, &actions, portid, attr_size, extack);
+       return ret;
 }
 
 static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON;
index 18089c02e55719d9818842f8cd3b35fa6cf94497..06f743d8ed4130bc3cc53996ecb9313546230066 100644 (file)
@@ -141,8 +141,8 @@ static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
        struct tcf_bpf *prog = to_bpf(act);
        struct tc_act_bpf opt = {
                .index   = prog->tcf_index,
-               .refcnt  = prog->tcf_refcnt - ref,
-               .bindcnt = prog->tcf_bindcnt - bind,
+               .refcnt  = refcount_read(&prog->tcf_refcnt) - ref,
+               .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
                .action  = prog->tcf_action,
        };
        struct tcf_t tm;
@@ -276,7 +276,8 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
 
 static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **act,
-                       int replace, int bind, struct netlink_ext_ack *extack)
+                       int replace, int bind, bool rtnl_held,
+                       struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, bpf_net_id);
        struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
@@ -298,21 +299,27 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
 
        parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
 
-       if (!tcf_idr_check(tn, parm->index, act, bind)) {
+       ret = tcf_idr_check_alloc(tn, &parm->index, act, bind);
+       if (!ret) {
                ret = tcf_idr_create(tn, parm->index, est, act,
                                     &act_bpf_ops, bind, true);
-               if (ret < 0)
+               if (ret < 0) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
 
                res = ACT_P_CREATED;
-       } else {
+       } else if (ret > 0) {
                /* Don't override defaults. */
                if (bind)
                        return 0;
 
-               tcf_idr_release(*act, bind);
-               if (!replace)
+               if (!replace) {
+                       tcf_idr_release(*act, bind);
                        return -EEXIST;
+               }
+       } else {
+               return ret;
        }
 
        is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
@@ -355,8 +362,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
 
        return res;
 out:
-       if (res == ACT_P_CREATED)
-               tcf_idr_release(*act, bind);
+       tcf_idr_release(*act, bind);
 
        return ret;
 }
@@ -387,6 +393,13 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_bpf_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, bpf_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_bpf_ops __read_mostly = {
        .kind           =       "bpf",
        .type           =       TCA_ACT_BPF,
@@ -397,6 +410,7 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
        .init           =       tcf_bpf_init,
        .walk           =       tcf_bpf_walker,
        .lookup         =       tcf_bpf_search,
+       .delete         =       tcf_bpf_delete,
        .size           =       sizeof(struct tcf_bpf),
 };
 
index e4b880fa51fec90fa1a1d92c11f8a337637d3509..1e31f0e448e2c57f6ac6458d94efd196841bb2b0 100644 (file)
@@ -96,7 +96,7 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
 
 static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                             struct nlattr *est, struct tc_action **a,
-                            int ovr, int bind,
+                            int ovr, int bind, bool rtnl_held,
                             struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, connmark_net_id);
@@ -118,11 +118,14 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
 
        parm = nla_data(tb[TCA_CONNMARK_PARMS]);
 
-       if (!tcf_idr_check(tn, parm->index, a, bind)) {
+       ret = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (!ret) {
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_connmark_ops, bind, false);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
 
                ci = to_connmark(*a);
                ci->tcf_action = parm->action;
@@ -131,16 +134,18 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
 
                tcf_idr_insert(tn, *a);
                ret = ACT_P_CREATED;
-       } else {
+       } else if (ret > 0) {
                ci = to_connmark(*a);
                if (bind)
                        return 0;
-               tcf_idr_release(*a, bind);
-               if (!ovr)
+               if (!ovr) {
+                       tcf_idr_release(*a, bind);
                        return -EEXIST;
+               }
                /* replacing action and zone */
                ci->tcf_action = parm->action;
                ci->zone = parm->zone;
+               ret = 0;
        }
 
        return ret;
@@ -154,8 +159,8 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
 
        struct tc_connmark opt = {
                .index   = ci->tcf_index,
-               .refcnt  = ci->tcf_refcnt - ref,
-               .bindcnt = ci->tcf_bindcnt - bind,
+               .refcnt  = refcount_read(&ci->tcf_refcnt) - ref,
+               .bindcnt = atomic_read(&ci->tcf_bindcnt) - bind,
                .action  = ci->tcf_action,
                .zone   = ci->zone,
        };
@@ -193,6 +198,13 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_connmark_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, connmark_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_connmark_ops = {
        .kind           =       "connmark",
        .type           =       TCA_ACT_CONNMARK,
@@ -202,6 +214,7 @@ static struct tc_action_ops act_connmark_ops = {
        .init           =       tcf_connmark_init,
        .walk           =       tcf_connmark_walker,
        .lookup         =       tcf_connmark_search,
+       .delete         =       tcf_connmark_delete,
        .size           =       sizeof(struct tcf_connmark_info),
 };
 
index 526a8e491626efb65fcda10d875e6f55ca2168e8..bd232d3bd022d516ad1ff228a69aaea7c194d752 100644 (file)
@@ -46,7 +46,8 @@ static struct tc_action_ops act_csum_ops;
 
 static int tcf_csum_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a, int ovr,
-                        int bind, struct netlink_ext_ack *extack)
+                        int bind, bool rtnl_held,
+                        struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, csum_net_id);
        struct tcf_csum_params *params_old, *params_new;
@@ -66,18 +67,24 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
                return -EINVAL;
        parm = nla_data(tb[TCA_CSUM_PARMS]);
 
-       if (!tcf_idr_check(tn, parm->index, a, bind)) {
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (!err) {
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_csum_ops, bind, true);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
                ret = ACT_P_CREATED;
-       } else {
+       } else if (err > 0) {
                if (bind)/* dont override defaults */
                        return 0;
-               tcf_idr_release(*a, bind);
-               if (!ovr)
+               if (!ovr) {
+                       tcf_idr_release(*a, bind);
                        return -EEXIST;
+               }
+       } else {
+               return err;
        }
 
        p = to_tcf_csum(*a);
@@ -85,8 +92,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               if (ret == ACT_P_CREATED)
-                       tcf_idr_release(*a, bind);
+               tcf_idr_release(*a, bind);
                return -ENOMEM;
        }
        params_old = rtnl_dereference(p->params);
@@ -597,8 +603,8 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
        struct tcf_csum_params *params;
        struct tc_csum opt = {
                .index   = p->tcf_index,
-               .refcnt  = p->tcf_refcnt - ref,
-               .bindcnt = p->tcf_bindcnt - bind,
+               .refcnt  = refcount_read(&p->tcf_refcnt) - ref,
+               .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
        };
        struct tcf_t t;
 
@@ -653,6 +659,13 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
        return nla_total_size(sizeof(struct tc_csum));
 }
 
+static int tcf_csum_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, csum_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_csum_ops = {
        .kind           = "csum",
        .type           = TCA_ACT_CSUM,
@@ -664,6 +677,7 @@ static struct tc_action_ops act_csum_ops = {
        .walk           = tcf_csum_walker,
        .lookup         = tcf_csum_search,
        .get_fill_size  = tcf_csum_get_fill_size,
+       .delete         = tcf_csum_delete,
        .size           = sizeof(struct tcf_csum),
 };
 
index 4dc4f153cad80861d38f975f7f70b7ce433dbc80..661b72b9147d52d320f094b91b7392488c1f25c2 100644 (file)
@@ -56,7 +56,8 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
 
 static int tcf_gact_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
-                        int ovr, int bind, struct netlink_ext_ack *extack)
+                        int ovr, int bind, bool rtnl_held,
+                        struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, gact_net_id);
        struct nlattr *tb[TCA_GACT_MAX + 1];
@@ -90,18 +91,24 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
        }
 #endif
 
-       if (!tcf_idr_check(tn, parm->index, a, bind)) {
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (!err) {
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_gact_ops, bind, true);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
                ret = ACT_P_CREATED;
-       } else {
+       } else if (err > 0) {
                if (bind)/* dont override defaults */
                        return 0;
-               tcf_idr_release(*a, bind);
-               if (!ovr)
+               if (!ovr) {
+                       tcf_idr_release(*a, bind);
                        return -EEXIST;
+               }
+       } else {
+               return err;
        }
 
        gact = to_gact(*a);
@@ -169,8 +176,8 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
        struct tcf_gact *gact = to_gact(a);
        struct tc_gact opt = {
                .index   = gact->tcf_index,
-               .refcnt  = gact->tcf_refcnt - ref,
-               .bindcnt = gact->tcf_bindcnt - bind,
+               .refcnt  = refcount_read(&gact->tcf_refcnt) - ref,
+               .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind,
                .action  = gact->tcf_action,
        };
        struct tcf_t t;
@@ -230,6 +237,13 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
        return sz;
 }
 
+static int tcf_gact_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, gact_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_gact_ops = {
        .kind           =       "gact",
        .type           =       TCA_ACT_GACT,
@@ -241,6 +255,7 @@ static struct tc_action_ops act_gact_ops = {
        .walk           =       tcf_gact_walker,
        .lookup         =       tcf_gact_search,
        .get_fill_size  =       tcf_gact_get_fill_size,
+       .delete         =       tcf_gact_delete,
        .size           =       sizeof(struct tcf_gact),
 };
 
index 8527cfdc446d9bb82e8fa9fe1364dc13249b1e03..3d6e265758c06b6ee07c20f9ac4c1786eb022e48 100644 (file)
@@ -415,7 +415,8 @@ static void tcf_ife_cleanup(struct tc_action *a)
        spin_unlock_bh(&ife->tcf_lock);
 
        p = rcu_dereference_protected(ife->params, 1);
-       kfree_rcu(p, rcu);
+       if (p)
+               kfree_rcu(p, rcu);
 }
 
 /* under ife->tcf_lock for existing action */
@@ -447,7 +448,8 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
 
 static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a,
-                       int ovr, int bind, struct netlink_ext_ack *extack)
+                       int ovr, int bind, bool rtnl_held,
+                       struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, ife_net_id);
        struct nlattr *tb[TCA_IFE_MAX + 1];
@@ -482,7 +484,12 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        if (!p)
                return -ENOMEM;
 
-       exists = tcf_idr_check(tn, parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (err < 0) {
+               kfree(p);
+               return err;
+       }
+       exists = err;
        if (exists && bind) {
                kfree(p);
                return 0;
@@ -492,16 +499,15 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
                                     bind, true);
                if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        kfree(p);
                        return ret;
                }
                ret = ACT_P_CREATED;
-       } else {
+       } else if (!ovr) {
                tcf_idr_release(*a, bind);
-               if (!ovr) {
-                       kfree(p);
-                       return -EEXIST;
-               }
+               kfree(p);
+               return -EEXIST;
        }
 
        ife = to_ife(*a);
@@ -516,8 +522,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        saddr = nla_data(tb[TCA_IFE_SMAC]);
        }
 
-       ife->tcf_action = parm->action;
-
        if (parm->flags & IFE_ENCODE) {
                if (daddr)
                        ether_addr_copy(p->eth_dst, daddr);
@@ -543,13 +547,13 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                                       NULL, NULL);
                if (err) {
 metadata_parse_err:
-                       if (exists)
-                               tcf_idr_release(*a, bind);
                        if (ret == ACT_P_CREATED)
-                               _tcf_ife_cleanup(*a);
+                               tcf_idr_release(*a, bind);
 
                        if (exists)
                                spin_unlock_bh(&ife->tcf_lock);
+                       tcf_idr_release(*a, bind);
+
                        kfree(p);
                        return err;
                }
@@ -567,7 +571,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                err = use_all_metadata(ife);
                if (err) {
                        if (ret == ACT_P_CREATED)
-                               _tcf_ife_cleanup(*a);
+                               tcf_idr_release(*a, bind);
 
                        if (exists)
                                spin_unlock_bh(&ife->tcf_lock);
@@ -576,6 +580,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                }
        }
 
+       ife->tcf_action = parm->action;
        if (exists)
                spin_unlock_bh(&ife->tcf_lock);
 
@@ -598,8 +603,8 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
        struct tcf_ife_params *p = rtnl_dereference(ife->params);
        struct tc_ife opt = {
                .index = ife->tcf_index,
-               .refcnt = ife->tcf_refcnt - ref,
-               .bindcnt = ife->tcf_bindcnt - bind,
+               .refcnt = refcount_read(&ife->tcf_refcnt) - ref,
+               .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind,
                .action = ife->tcf_action,
                .flags = p->flags,
        };
@@ -845,6 +850,13 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_ife_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, ife_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_ife_ops = {
        .kind = "ife",
        .type = TCA_ACT_IFE,
@@ -855,6 +867,7 @@ static struct tc_action_ops act_ife_ops = {
        .init = tcf_ife_init,
        .walk = tcf_ife_walker,
        .lookup = tcf_ife_search,
+       .delete = tcf_ife_delete,
        .size = sizeof(struct tcf_ife_info),
 };
 
index 14c312d7908f535cb4f43d8d0a73e4cbe445d362..0dc787a57798292be40ba1f66c16d2affd31a046 100644 (file)
@@ -119,13 +119,18 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
        if (tb[TCA_IPT_INDEX] != NULL)
                index = nla_get_u32(tb[TCA_IPT_INDEX]);
 
-       exists = tcf_idr_check(tn, index, a, bind);
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
+       if (err < 0)
+               return err;
+       exists = err;
        if (exists && bind)
                return 0;
 
        if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
                if (exists)
                        tcf_idr_release(*a, bind);
+               else
+                       tcf_idr_cleanup(tn, index);
                return -EINVAL;
        }
 
@@ -133,22 +138,27 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
        if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
                if (exists)
                        tcf_idr_release(*a, bind);
+               else
+                       tcf_idr_cleanup(tn, index);
                return -EINVAL;
        }
 
        if (!exists) {
                ret = tcf_idr_create(tn, index, est, a, ops, bind,
                                     false);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, index);
                        return ret;
+               }
                ret = ACT_P_CREATED;
        } else {
                if (bind)/* dont override defaults */
                        return 0;
-               tcf_idr_release(*a, bind);
 
-               if (!ovr)
+               if (!ovr) {
+                       tcf_idr_release(*a, bind);
                        return -EEXIST;
+               }
        }
        hook = nla_get_u32(tb[TCA_IPT_HOOK]);
 
@@ -196,7 +206,8 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
 
 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a, int ovr,
-                       int bind, struct netlink_ext_ack *extack)
+                       int bind, bool rtnl_held,
+                       struct netlink_ext_ack *extack)
 {
        return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
                              bind);
@@ -204,7 +215,8 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
 
 static int tcf_xt_init(struct net *net, struct nlattr *nla,
                       struct nlattr *est, struct tc_action **a, int ovr,
-                      int bind, struct netlink_ext_ack *extack)
+                      int bind, bool unlocked,
+                      struct netlink_ext_ack *extack)
 {
        return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
                              bind);
@@ -280,8 +292,8 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
        if (unlikely(!t))
                goto nla_put_failure;
 
-       c.bindcnt = ipt->tcf_bindcnt - bind;
-       c.refcnt = ipt->tcf_refcnt - ref;
+       c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind;
+       c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref;
        strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
 
        if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
@@ -322,6 +334,13 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_ipt_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, ipt_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_ipt_ops = {
        .kind           =       "ipt",
        .type           =       TCA_ACT_IPT,
@@ -332,6 +351,7 @@ static struct tc_action_ops act_ipt_ops = {
        .init           =       tcf_ipt_init,
        .walk           =       tcf_ipt_walker,
        .lookup         =       tcf_ipt_search,
+       .delete         =       tcf_ipt_delete,
        .size           =       sizeof(struct tcf_ipt),
 };
 
@@ -372,6 +392,13 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_xt_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, xt_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_xt_ops = {
        .kind           =       "xt",
        .type           =       TCA_ACT_XT,
@@ -382,6 +409,7 @@ static struct tc_action_ops act_xt_ops = {
        .init           =       tcf_xt_init,
        .walk           =       tcf_xt_walker,
        .lookup         =       tcf_xt_search,
+       .delete         =       tcf_xt_delete,
        .size           =       sizeof(struct tcf_ipt),
 };
 
index fd34015331ab86c395a2e599546a51b64efb8625..6afd89a36c69032668bf2f287da9493076c4e5ea 100644 (file)
@@ -68,8 +68,9 @@ static unsigned int mirred_net_id;
 static struct tc_action_ops act_mirred_ops;
 
 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
-                          struct nlattr *est, struct tc_action **a, int ovr,
-                          int bind, struct netlink_ext_ack *extack)
+                          struct nlattr *est, struct tc_action **a,
+                          int ovr, int bind, bool rtnl_held,
+                          struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, mirred_net_id);
        struct nlattr *tb[TCA_MIRRED_MAX + 1];
@@ -78,7 +79,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        struct tcf_mirred *m;
        struct net_device *dev;
        bool exists = false;
-       int ret;
+       int ret, err;
 
        if (!nla) {
                NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
@@ -93,7 +94,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        }
        parm = nla_data(tb[TCA_MIRRED_PARMS]);
 
-       exists = tcf_idr_check(tn, parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (err < 0)
+               return err;
+       exists = err;
        if (exists && bind)
                return 0;
 
@@ -106,6 +110,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        default:
                if (exists)
                        tcf_idr_release(*a, bind);
+               else
+                       tcf_idr_cleanup(tn, parm->index);
                NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
                return -EINVAL;
        }
@@ -114,6 +120,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                if (dev == NULL) {
                        if (exists)
                                tcf_idr_release(*a, bind);
+                       else
+                               tcf_idr_cleanup(tn, parm->index);
                        return -ENODEV;
                }
                mac_header_xmit = dev_is_mac_header_xmit(dev);
@@ -123,18 +131,20 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
 
        if (!exists) {
                if (!dev) {
+                       tcf_idr_cleanup(tn, parm->index);
                        NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
                        return -EINVAL;
                }
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_mirred_ops, bind, true);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
                ret = ACT_P_CREATED;
-       } else {
+       } else if (!ovr) {
                tcf_idr_release(*a, bind);
-               if (!ovr)
-                       return -EEXIST;
+               return -EEXIST;
        }
        m = to_mirred(*a);
 
@@ -250,8 +260,8 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
        struct tc_mirred opt = {
                .index   = m->tcf_index,
                .action  = m->tcf_action,
-               .refcnt  = m->tcf_refcnt - ref,
-               .bindcnt = m->tcf_bindcnt - bind,
+               .refcnt  = refcount_read(&m->tcf_refcnt) - ref,
+               .bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
                .eaction = m->tcfm_eaction,
                .ifindex = dev ? dev->ifindex : 0,
        };
@@ -321,6 +331,13 @@ static struct net_device *tcf_mirred_get_dev(const struct tc_action *a)
        return rtnl_dereference(m->tcfm_dev);
 }
 
+static int tcf_mirred_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, mirred_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_mirred_ops = {
        .kind           =       "mirred",
        .type           =       TCA_ACT_MIRRED,
@@ -334,6 +351,7 @@ static struct tc_action_ops act_mirred_ops = {
        .lookup         =       tcf_mirred_search,
        .size           =       sizeof(struct tcf_mirred),
        .get_dev        =       tcf_mirred_get_dev,
+       .delete         =       tcf_mirred_delete,
 };
 
 static __net_init int mirred_init_net(struct net *net)
index 4b5848b6c25207ac74b0508259f9f3019020d3c9..4dd9188a72fddd9ebb7ccf87950d1068b61837af 100644 (file)
@@ -38,7 +38,7 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
 
 static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                        struct tc_action **a, int ovr, int bind,
-                       struct netlink_ext_ack *extack)
+                       bool rtnl_held, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, nat_net_id);
        struct nlattr *tb[TCA_NAT_MAX + 1];
@@ -57,18 +57,24 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                return -EINVAL;
        parm = nla_data(tb[TCA_NAT_PARMS]);
 
-       if (!tcf_idr_check(tn, parm->index, a, bind)) {
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (!err) {
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_nat_ops, bind, false);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
                ret = ACT_P_CREATED;
-       } else {
+       } else if (err > 0) {
                if (bind)
                        return 0;
-               tcf_idr_release(*a, bind);
-               if (!ovr)
+               if (!ovr) {
+                       tcf_idr_release(*a, bind);
                        return -EEXIST;
+               }
+       } else {
+               return err;
        }
        p = to_tcf_nat(*a);
 
@@ -257,8 +263,8 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
 
                .index    = p->tcf_index,
                .action   = p->tcf_action,
-               .refcnt   = p->tcf_refcnt - ref,
-               .bindcnt  = p->tcf_bindcnt - bind,
+               .refcnt   = refcount_read(&p->tcf_refcnt) - ref,
+               .bindcnt  = atomic_read(&p->tcf_bindcnt) - bind,
        };
        struct tcf_t t;
 
@@ -294,6 +300,13 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_nat_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, nat_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_nat_ops = {
        .kind           =       "nat",
        .type           =       TCA_ACT_NAT,
@@ -303,6 +316,7 @@ static struct tc_action_ops act_nat_ops = {
        .init           =       tcf_nat_init,
        .walk           =       tcf_nat_walker,
        .lookup         =       tcf_nat_search,
+       .delete         =       tcf_nat_delete,
        .size           =       sizeof(struct tcf_nat),
 };
 
index 8a925c72db5fe413eaf4db3ac231f26484b049cb..cc8ffcd1ddb5841e3cc795391bea6758d20fa530 100644 (file)
@@ -132,20 +132,23 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
 
 static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
-                         int ovr, int bind, struct netlink_ext_ack *extack)
+                         int ovr, int bind, bool rtnl_held,
+                         struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, pedit_net_id);
        struct nlattr *tb[TCA_PEDIT_MAX + 1];
-       struct nlattr *pattr;
-       struct tc_pedit *parm;
-       int ret = 0, err;
-       struct tcf_pedit *p;
        struct tc_pedit_key *keys = NULL;
        struct tcf_pedit_key_ex *keys_ex;
+       struct tc_pedit *parm;
+       struct nlattr *pattr;
+       struct tcf_pedit *p;
+       int ret = 0, err;
        int ksize;
 
-       if (nla == NULL)
+       if (!nla) {
+               NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
                return -EINVAL;
+       }
 
        err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy, NULL);
        if (err < 0)
@@ -154,47 +157,62 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        pattr = tb[TCA_PEDIT_PARMS];
        if (!pattr)
                pattr = tb[TCA_PEDIT_PARMS_EX];
-       if (!pattr)
+       if (!pattr) {
+               NL_SET_ERR_MSG_MOD(extack, "Missing required TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute");
                return -EINVAL;
+       }
 
        parm = nla_data(pattr);
        ksize = parm->nkeys * sizeof(struct tc_pedit_key);
-       if (nla_len(pattr) < sizeof(*parm) + ksize)
+       if (nla_len(pattr) < sizeof(*parm) + ksize) {
+               NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
                return -EINVAL;
+       }
 
        keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys);
        if (IS_ERR(keys_ex))
                return PTR_ERR(keys_ex);
 
-       if (!tcf_idr_check(tn, parm->index, a, bind)) {
-               if (!parm->nkeys)
-                       return -EINVAL;
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (!err) {
+               if (!parm->nkeys) {
+                       tcf_idr_cleanup(tn, parm->index);
+                       NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
+                       ret = -EINVAL;
+                       goto out_free;
+               }
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_pedit_ops, bind, false);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
+                       goto out_free;
+               }
                p = to_pedit(*a);
                keys = kmalloc(ksize, GFP_KERNEL);
-               if (keys == NULL) {
+               if (!keys) {
                        tcf_idr_release(*a, bind);
-                       kfree(keys_ex);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto out_free;
                }
                ret = ACT_P_CREATED;
-       } else {
+       } else if (err > 0) {
                if (bind)
-                       return 0;
-               tcf_idr_release(*a, bind);
-               if (!ovr)
-                       return -EEXIST;
+                       goto out_free;
+               if (!ovr) {
+                       tcf_idr_release(*a, bind);
+                       ret = -EEXIST;
+                       goto out_free;
+               }
                p = to_pedit(*a);
                if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
                        keys = kmalloc(ksize, GFP_KERNEL);
                        if (!keys) {
-                               kfree(keys_ex);
-                               return -ENOMEM;
+                               ret = -ENOMEM;
+                               goto out_free;
                        }
                }
+       } else {
+               return err;
        }
 
        spin_lock_bh(&p->tcf_lock);
@@ -214,12 +232,17 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+out_free:
+       kfree(keys_ex);
+       return ret;
+
 }
 
 static void tcf_pedit_cleanup(struct tc_action *a)
 {
        struct tcf_pedit *p = to_pedit(a);
        struct tc_pedit_key *keys = p->tcfp_keys;
+
        kfree(keys);
        kfree(p->tcfp_keys_ex);
 }
@@ -284,11 +307,12 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
        if (p->tcfp_nkeys > 0) {
                struct tc_pedit_key *tkey = p->tcfp_keys;
                struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex;
-               enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+               enum pedit_header_type htype =
+                       TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
                enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
 
                for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
-                       u32 *ptr, _data;
+                       u32 *ptr, hdata;
                        int offset = tkey->off;
                        int hoffset;
                        u32 val;
@@ -303,39 +327,39 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
 
                        rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
                        if (rc) {
-                               pr_info("tc filter pedit bad header type specified (0x%x)\n",
+                               pr_info("tc action pedit bad header type specified (0x%x)\n",
                                        htype);
                                goto bad;
                        }
 
                        if (tkey->offmask) {
-                               char *d, _d;
+                               u8 *d, _d;
 
                                if (!offset_valid(skb, hoffset + tkey->at)) {
-                                       pr_info("tc filter pedit 'at' offset %d out of bounds\n",
+                                       pr_info("tc action pedit 'at' offset %d out of bounds\n",
                                                hoffset + tkey->at);
                                        goto bad;
                                }
-                               d = skb_header_pointer(skb, hoffset + tkey->at, 1,
-                                                      &_d);
+                               d = skb_header_pointer(skb, hoffset + tkey->at,
+                                                      sizeof(_d), &_d);
                                if (!d)
                                        goto bad;
                                offset += (*d & tkey->offmask) >> tkey->shift;
                        }
 
                        if (offset % 4) {
-                               pr_info("tc filter pedit"
-                                       " offset must be on 32 bit boundaries\n");
+                               pr_info("tc action pedit offset must be on 32 bit boundaries\n");
                                goto bad;
                        }
 
                        if (!offset_valid(skb, hoffset + offset)) {
-                               pr_info("tc filter pedit offset %d out of bounds\n",
+                               pr_info("tc action pedit offset %d out of bounds\n",
                                        hoffset + offset);
                                goto bad;
                        }
 
-                       ptr = skb_header_pointer(skb, hoffset + offset, 4, &_data);
+                       ptr = skb_header_pointer(skb, hoffset + offset,
+                                                sizeof(hdata), &hdata);
                        if (!ptr)
                                goto bad;
                        /* just do it, baby */
@@ -347,19 +371,20 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
                                val = (*ptr + tkey->val) & ~tkey->mask;
                                break;
                        default:
-                               pr_info("tc filter pedit bad command (%d)\n",
+                               pr_info("tc action pedit bad command (%d)\n",
                                        cmd);
                                goto bad;
                        }
 
                        *ptr = ((*ptr & tkey->mask) ^ val);
-                       if (ptr == &_data)
+                       if (ptr == &hdata)
                                skb_store_bits(skb, hoffset + offset, ptr, 4);
                }
 
                goto done;
-       } else
+       } else {
                WARN(1, "pedit BUG: index %d\n", p->tcf_index);
+       }
 
 bad:
        p->tcf_qstats.overlimits++;
@@ -391,8 +416,8 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
        opt->nkeys = p->tcfp_nkeys;
        opt->flags = p->tcfp_flags;
        opt->action = p->tcf_action;
-       opt->refcnt = p->tcf_refcnt - ref;
-       opt->bindcnt = p->tcf_bindcnt - bind;
+       opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
+       opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
 
        if (p->tcfp_keys_ex) {
                tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
@@ -435,6 +460,13 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_pedit_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, pedit_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_pedit_ops = {
        .kind           =       "pedit",
        .type           =       TCA_ACT_PEDIT,
@@ -445,6 +477,7 @@ static struct tc_action_ops act_pedit_ops = {
        .init           =       tcf_pedit_init,
        .walk           =       tcf_pedit_walker,
        .lookup         =       tcf_pedit_search,
+       .delete         =       tcf_pedit_delete,
        .size           =       sizeof(struct tcf_pedit),
 };
 
index 4e72bc2a0dfb525df3cc4ac582f417ce5c537af3..1f3192ea8df7a5f4de28c297cac3337cfccfe601 100644 (file)
@@ -75,7 +75,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
 
 static int tcf_act_police_init(struct net *net, struct nlattr *nla,
                               struct nlattr *est, struct tc_action **a,
-                              int ovr, int bind,
+                              int ovr, int bind, bool rtnl_held,
                               struct netlink_ext_ack *extack)
 {
        int ret = 0, err;
@@ -101,20 +101,24 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
                return -EINVAL;
 
        parm = nla_data(tb[TCA_POLICE_TBF]);
-       exists = tcf_idr_check(tn, parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (err < 0)
+               return err;
+       exists = err;
        if (exists && bind)
                return 0;
 
        if (!exists) {
                ret = tcf_idr_create(tn, parm->index, NULL, a,
                                     &act_police_ops, bind, false);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
                ret = ACT_P_CREATED;
-       } else {
+       } else if (!ovr) {
                tcf_idr_release(*a, bind);
-               if (!ovr)
-                       return -EEXIST;
+               return -EEXIST;
        }
 
        police = to_police(*a);
@@ -195,8 +199,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
 failure:
        qdisc_put_rtab(P_tab);
        qdisc_put_rtab(R_tab);
-       if (ret == ACT_P_CREATED)
-               tcf_idr_release(*a, bind);
+       tcf_idr_release(*a, bind);
        return err;
 }
 
@@ -274,8 +277,8 @@ static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a,
                .action = police->tcf_action,
                .mtu = police->tcfp_mtu,
                .burst = PSCHED_NS2TICKS(police->tcfp_burst),
-               .refcnt = police->tcf_refcnt - ref,
-               .bindcnt = police->tcf_bindcnt - bind,
+               .refcnt = refcount_read(&police->tcf_refcnt) - ref,
+               .bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
        };
        struct tcf_t t;
 
@@ -314,6 +317,13 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_police_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, police_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 MODULE_AUTHOR("Alexey Kuznetsov");
 MODULE_DESCRIPTION("Policing actions");
 MODULE_LICENSE("GPL");
@@ -327,6 +337,7 @@ static struct tc_action_ops act_police_ops = {
        .init           =       tcf_act_police_init,
        .walk           =       tcf_act_police_walker,
        .lookup         =       tcf_police_search,
+       .delete         =       tcf_police_delete,
        .size           =       sizeof(struct tcf_police),
 };
 
index 5db358497c9ee610c499c88d0dd6eff463ccd70d..3079e7be5bdef54de97692e27fa65299e69406be 100644 (file)
@@ -37,7 +37,8 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
 
 static int tcf_sample_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a, int ovr,
-                          int bind, struct netlink_ext_ack *extack)
+                          int bind, bool rtnl_held,
+                          struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, sample_net_id);
        struct nlattr *tb[TCA_SAMPLE_MAX + 1];
@@ -45,7 +46,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        struct tc_sample *parm;
        struct tcf_sample *s;
        bool exists = false;
-       int ret;
+       int ret, err;
 
        if (!nla)
                return -EINVAL;
@@ -58,20 +59,24 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 
        parm = nla_data(tb[TCA_SAMPLE_PARMS]);
 
-       exists = tcf_idr_check(tn, parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (err < 0)
+               return err;
+       exists = err;
        if (exists && bind)
                return 0;
 
        if (!exists) {
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_sample_ops, bind, false);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
                ret = ACT_P_CREATED;
-       } else {
+       } else if (!ovr) {
                tcf_idr_release(*a, bind);
-               if (!ovr)
-                       return -EEXIST;
+               return -EEXIST;
        }
        s = to_sample(*a);
 
@@ -80,8 +85,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
        s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
        psample_group = psample_group_get(net, s->psample_group_num);
        if (!psample_group) {
-               if (ret == ACT_P_CREATED)
-                       tcf_idr_release(*a, bind);
+               tcf_idr_release(*a, bind);
                return -ENOMEM;
        }
        RCU_INIT_POINTER(s->psample_group, psample_group);
@@ -173,8 +177,8 @@ static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
        struct tc_sample opt = {
                .index      = s->tcf_index,
                .action     = s->tcf_action,
-               .refcnt     = s->tcf_refcnt - ref,
-               .bindcnt    = s->tcf_bindcnt - bind,
+               .refcnt     = refcount_read(&s->tcf_refcnt) - ref,
+               .bindcnt    = atomic_read(&s->tcf_bindcnt) - bind,
        };
        struct tcf_t t;
 
@@ -219,6 +223,13 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_sample_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_sample_ops = {
        .kind     = "sample",
        .type     = TCA_ACT_SAMPLE,
@@ -229,6 +240,7 @@ static struct tc_action_ops act_sample_ops = {
        .cleanup  = tcf_sample_cleanup,
        .walk     = tcf_sample_walker,
        .lookup   = tcf_sample_search,
+       .delete   = tcf_sample_delete,
        .size     = sizeof(struct tcf_sample),
 };
 
index 98c4afe7c15b29a99a3d18e06934e34f0732110d..aa51152e00668e76cccc08e3592769f1f7874915 100644 (file)
@@ -79,7 +79,8 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
 
 static int tcf_simp_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
-                        int ovr, int bind, struct netlink_ext_ack *extack)
+                        int ovr, int bind, bool rtnl_held,
+                        struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, simp_net_id);
        struct nlattr *tb[TCA_DEF_MAX + 1];
@@ -99,21 +100,28 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
                return -EINVAL;
 
        parm = nla_data(tb[TCA_DEF_PARMS]);
-       exists = tcf_idr_check(tn, parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (err < 0)
+               return err;
+       exists = err;
        if (exists && bind)
                return 0;
 
        if (tb[TCA_DEF_DATA] == NULL) {
                if (exists)
                        tcf_idr_release(*a, bind);
+               else
+                       tcf_idr_cleanup(tn, parm->index);
                return -EINVAL;
        }
 
        if (!exists) {
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_simp_ops, bind, false);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
 
                d = to_defact(*a);
                ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
@@ -126,9 +134,10 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        } else {
                d = to_defact(*a);
 
-               tcf_idr_release(*a, bind);
-               if (!ovr)
+               if (!ovr) {
+                       tcf_idr_release(*a, bind);
                        return -EEXIST;
+               }
 
                reset_policy(d, tb[TCA_DEF_DATA], parm);
        }
@@ -145,8 +154,8 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
        struct tcf_defact *d = to_defact(a);
        struct tc_defact opt = {
                .index   = d->tcf_index,
-               .refcnt  = d->tcf_refcnt - ref,
-               .bindcnt = d->tcf_bindcnt - bind,
+               .refcnt  = refcount_read(&d->tcf_refcnt) - ref,
+               .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
                .action  = d->tcf_action,
        };
        struct tcf_t t;
@@ -183,6 +192,13 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_simp_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, simp_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_simp_ops = {
        .kind           =       "simple",
        .type           =       TCA_ACT_SIMP,
@@ -193,6 +209,7 @@ static struct tc_action_ops act_simp_ops = {
        .init           =       tcf_simp_init,
        .walk           =       tcf_simp_walker,
        .lookup         =       tcf_simp_search,
+       .delete         =       tcf_simp_delete,
        .size           =       sizeof(struct tcf_defact),
 };
 
index 6138d1d71900b561f50578bf22110902bb488bf4..da56e6938c9e094e8e7e7c39edac5b7d473d2c11 100644 (file)
@@ -23,6 +23,9 @@
 #include <linux/rtnetlink.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/dsfield.h>
 
 #include <linux/tc_act/tc_skbedit.h>
 #include <net/tc_act/tc_skbedit.h>
@@ -34,25 +37,54 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
                       struct tcf_result *res)
 {
        struct tcf_skbedit *d = to_skbedit(a);
+       struct tcf_skbedit_params *params;
+       int action;
 
-       spin_lock(&d->tcf_lock);
        tcf_lastuse_update(&d->tcf_tm);
-       bstats_update(&d->tcf_bstats, skb);
-
-       if (d->flags & SKBEDIT_F_PRIORITY)
-               skb->priority = d->priority;
-       if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
-           skb->dev->real_num_tx_queues > d->queue_mapping)
-               skb_set_queue_mapping(skb, d->queue_mapping);
-       if (d->flags & SKBEDIT_F_MARK) {
-               skb->mark &= ~d->mask;
-               skb->mark |= d->mark & d->mask;
+       bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+
+       rcu_read_lock();
+       params = rcu_dereference(d->params);
+       action = READ_ONCE(d->tcf_action);
+
+       if (params->flags & SKBEDIT_F_PRIORITY)
+               skb->priority = params->priority;
+       if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
+               int wlen = skb_network_offset(skb);
+
+               switch (tc_skb_protocol(skb)) {
+               case htons(ETH_P_IP):
+                       wlen += sizeof(struct iphdr);
+                       if (!pskb_may_pull(skb, wlen))
+                               goto err;
+                       skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+                       break;
+
+               case htons(ETH_P_IPV6):
+                       wlen += sizeof(struct ipv6hdr);
+                       if (!pskb_may_pull(skb, wlen))
+                               goto err;
+                       skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+                       break;
+               }
        }
-       if (d->flags & SKBEDIT_F_PTYPE)
-               skb->pkt_type = d->ptype;
-
-       spin_unlock(&d->tcf_lock);
-       return d->tcf_action;
+       if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
+           skb->dev->real_num_tx_queues > params->queue_mapping)
+               skb_set_queue_mapping(skb, params->queue_mapping);
+       if (params->flags & SKBEDIT_F_MARK) {
+               skb->mark &= ~params->mask;
+               skb->mark |= params->mark & params->mask;
+       }
+       if (params->flags & SKBEDIT_F_PTYPE)
+               skb->pkt_type = params->ptype;
+
+unlock:
+       rcu_read_unlock();
+       return action;
+err:
+       qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
+       action = TC_ACT_SHOT;
+       goto unlock;
 }
 
 static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
@@ -62,13 +94,16 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
        [TCA_SKBEDIT_MARK]              = { .len = sizeof(u32) },
        [TCA_SKBEDIT_PTYPE]             = { .len = sizeof(u16) },
        [TCA_SKBEDIT_MASK]              = { .len = sizeof(u32) },
+       [TCA_SKBEDIT_FLAGS]             = { .len = sizeof(u64) },
 };
 
 static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                            struct nlattr *est, struct tc_action **a,
-                           int ovr, int bind, struct netlink_ext_ack *extack)
+                           int ovr, int bind, bool rtnl_held,
+                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
+       struct tcf_skbedit_params *params_old, *params_new;
        struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
        struct tc_skbedit *parm;
        struct tcf_skbedit *d;
@@ -114,52 +149,76 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                mask = nla_data(tb[TCA_SKBEDIT_MASK]);
        }
 
+       if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
+               u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
+
+               if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
+                       flags |= SKBEDIT_F_INHERITDSFIELD;
+       }
+
        parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 
-       exists = tcf_idr_check(tn, parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (err < 0)
+               return err;
+       exists = err;
        if (exists && bind)
                return 0;
 
        if (!flags) {
                if (exists)
                        tcf_idr_release(*a, bind);
+               else
+                       tcf_idr_cleanup(tn, parm->index);
                return -EINVAL;
        }
 
        if (!exists) {
                ret = tcf_idr_create(tn, parm->index, est, a,
-                                    &act_skbedit_ops, bind, false);
-               if (ret)
+                                    &act_skbedit_ops, bind, true);
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
 
                d = to_skbedit(*a);
                ret = ACT_P_CREATED;
        } else {
                d = to_skbedit(*a);
-               tcf_idr_release(*a, bind);
-               if (!ovr)
+               if (!ovr) {
+                       tcf_idr_release(*a, bind);
                        return -EEXIST;
+               }
        }
 
-       spin_lock_bh(&d->tcf_lock);
+       ASSERT_RTNL();
+
+       params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+       if (unlikely(!params_new)) {
+               if (ret == ACT_P_CREATED)
+                       tcf_idr_release(*a, bind);
+               return -ENOMEM;
+       }
 
-       d->flags = flags;
+       params_new->flags = flags;
        if (flags & SKBEDIT_F_PRIORITY)
-               d->priority = *priority;
+               params_new->priority = *priority;
        if (flags & SKBEDIT_F_QUEUE_MAPPING)
-               d->queue_mapping = *queue_mapping;
+               params_new->queue_mapping = *queue_mapping;
        if (flags & SKBEDIT_F_MARK)
-               d->mark = *mark;
+               params_new->mark = *mark;
        if (flags & SKBEDIT_F_PTYPE)
-               d->ptype = *ptype;
+               params_new->ptype = *ptype;
        /* default behaviour is to use all the bits */
-       d->mask = 0xffffffff;
+       params_new->mask = 0xffffffff;
        if (flags & SKBEDIT_F_MASK)
-               d->mask = *mask;
+               params_new->mask = *mask;
 
        d->tcf_action = parm->action;
-
-       spin_unlock_bh(&d->tcf_lock);
+       params_old = rtnl_dereference(d->params);
+       rcu_assign_pointer(d->params, params_new);
+       if (params_old)
+               kfree_rcu(params_old, rcu);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
@@ -171,30 +230,39 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_skbedit *d = to_skbedit(a);
+       struct tcf_skbedit_params *params;
        struct tc_skbedit opt = {
                .index   = d->tcf_index,
-               .refcnt  = d->tcf_refcnt - ref,
-               .bindcnt = d->tcf_bindcnt - bind,
+               .refcnt  = refcount_read(&d->tcf_refcnt) - ref,
+               .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
                .action  = d->tcf_action,
        };
+       u64 pure_flags = 0;
        struct tcf_t t;
 
+       params = rtnl_dereference(d->params);
+
        if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
-       if ((d->flags & SKBEDIT_F_PRIORITY) &&
-           nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority))
+       if ((params->flags & SKBEDIT_F_PRIORITY) &&
+           nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
+               goto nla_put_failure;
+       if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
+           nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
                goto nla_put_failure;
-       if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
-           nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping))
+       if ((params->flags & SKBEDIT_F_MARK) &&
+           nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
                goto nla_put_failure;
-       if ((d->flags & SKBEDIT_F_MARK) &&
-           nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark))
+       if ((params->flags & SKBEDIT_F_PTYPE) &&
+           nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
                goto nla_put_failure;
-       if ((d->flags & SKBEDIT_F_PTYPE) &&
-           nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype))
+       if ((params->flags & SKBEDIT_F_MASK) &&
+           nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
                goto nla_put_failure;
-       if ((d->flags & SKBEDIT_F_MASK) &&
-           nla_put_u32(skb, TCA_SKBEDIT_MASK, d->mask))
+       if (params->flags & SKBEDIT_F_INHERITDSFIELD)
+               pure_flags |= SKBEDIT_F_INHERITDSFIELD;
+       if (pure_flags != 0 &&
+           nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
                goto nla_put_failure;
 
        tcf_tm_dump(&t, &d->tcf_tm);
@@ -207,6 +275,16 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
        return -1;
 }
 
+static void tcf_skbedit_cleanup(struct tc_action *a)
+{
+       struct tcf_skbedit *d = to_skbedit(a);
+       struct tcf_skbedit_params *params;
+
+       params = rcu_dereference_protected(d->params, 1);
+       if (params)
+               kfree_rcu(params, rcu);
+}
+
 static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
                              struct netlink_callback *cb, int type,
                              const struct tc_action_ops *ops,
@@ -225,6 +303,13 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_skbedit_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, skbedit_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_skbedit_ops = {
        .kind           =       "skbedit",
        .type           =       TCA_ACT_SKBEDIT,
@@ -232,8 +317,10 @@ static struct tc_action_ops act_skbedit_ops = {
        .act            =       tcf_skbedit,
        .dump           =       tcf_skbedit_dump,
        .init           =       tcf_skbedit_init,
+       .cleanup        =       tcf_skbedit_cleanup,
        .walk           =       tcf_skbedit_walker,
        .lookup         =       tcf_skbedit_search,
+       .delete         =       tcf_skbedit_delete,
        .size           =       sizeof(struct tcf_skbedit),
 };
 
index ad050d7d4b46a2d45f85e15bb7e68d28915f1d54..cdc6bacfb19078a8be10d846ea5b356c47f849ed 100644 (file)
@@ -84,7 +84,8 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
 
 static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
-                          int ovr, int bind, struct netlink_ext_ack *extack)
+                          int ovr, int bind, bool rtnl_held,
+                          struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, skbmod_net_id);
        struct nlattr *tb[TCA_SKBMOD_MAX + 1];
@@ -127,27 +128,33 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
        if (parm->flags & SKBMOD_F_SWAPMAC)
                lflags = SKBMOD_F_SWAPMAC;
 
-       exists = tcf_idr_check(tn, parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (err < 0)
+               return err;
+       exists = err;
        if (exists && bind)
                return 0;
 
        if (!lflags) {
                if (exists)
                        tcf_idr_release(*a, bind);
+               else
+                       tcf_idr_cleanup(tn, parm->index);
                return -EINVAL;
        }
 
        if (!exists) {
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_skbmod_ops, bind, true);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
 
                ret = ACT_P_CREATED;
-       } else {
+       } else if (!ovr) {
                tcf_idr_release(*a, bind);
-               if (!ovr)
-                       return -EEXIST;
+               return -EEXIST;
        }
 
        d = to_skbmod(*a);
@@ -155,8 +162,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
        ASSERT_RTNL();
        p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
        if (unlikely(!p)) {
-               if (ret == ACT_P_CREATED)
-                       tcf_idr_release(*a, bind);
+               tcf_idr_release(*a, bind);
                return -ENOMEM;
        }
 
@@ -205,8 +211,8 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
        struct tcf_skbmod_params  *p = rtnl_dereference(d->skbmod_p);
        struct tc_skbmod opt = {
                .index   = d->tcf_index,
-               .refcnt  = d->tcf_refcnt - ref,
-               .bindcnt = d->tcf_bindcnt - bind,
+               .refcnt  = refcount_read(&d->tcf_refcnt) - ref,
+               .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
                .action  = d->tcf_action,
        };
        struct tcf_t t;
@@ -252,6 +258,13 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_skbmod_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_skbmod_ops = {
        .kind           =       "skbmod",
        .type           =       TCA_ACT_SKBMOD,
@@ -262,6 +275,7 @@ static struct tc_action_ops act_skbmod_ops = {
        .cleanup        =       tcf_skbmod_cleanup,
        .walk           =       tcf_skbmod_walker,
        .lookup         =       tcf_skbmod_search,
+       .delete         =       tcf_skbmod_delete,
        .size           =       sizeof(struct tcf_skbmod),
 };
 
index 626dac81a48a6b2ab97e9d0c786b08989f693288..3ec585d587629e282e9be08bb493c70c79dcbaa5 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
 #include <linux/rtnetlink.h>
+#include <net/geneve.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
@@ -57,6 +58,135 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
        return action;
 }
 
+static const struct nla_policy
+enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
+       [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
+       [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
+       [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
+       [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
+                                                      .len = 128 },
+};
+
+static int
+tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
+                          struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
+       int err, data_len, opt_len;
+       u8 *data;
+
+       err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
+                              nla, geneve_opt_policy, extack);
+       if (err < 0)
+               return err;
+
+       if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
+           !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
+           !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
+               NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
+               return -EINVAL;
+       }
+
+       data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
+       data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
+       if (data_len < 4) {
+               NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
+               return -ERANGE;
+       }
+       if (data_len % 4) {
+               NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
+               return -ERANGE;
+       }
+
+       opt_len = sizeof(struct geneve_opt) + data_len;
+       if (dst) {
+               struct geneve_opt *opt = dst;
+
+               WARN_ON(dst_len < opt_len);
+
+               opt->opt_class =
+                       nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
+               opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
+               opt->length = data_len / 4; /* length is in units of 4 bytes */
+               opt->r1 = 0;
+               opt->r2 = 0;
+               opt->r3 = 0;
+
+               memcpy(opt + 1, data, data_len);
+       }
+
+       return opt_len;
+}
+
+static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
+                               int dst_len, struct netlink_ext_ack *extack)
+{
+       int err, rem, opt_len, len = nla_len(nla), opts_len = 0;
+       const struct nlattr *attr, *head = nla_data(nla);
+
+       err = nla_validate(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
+                          enc_opts_policy, extack);
+       if (err)
+               return err;
+
+       nla_for_each_attr(attr, head, len, rem) {
+               switch (nla_type(attr)) {
+               case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
+                       opt_len = tunnel_key_copy_geneve_opt(attr, dst,
+                                                            dst_len, extack);
+                       if (opt_len < 0)
+                               return opt_len;
+                       opts_len += opt_len;
+                       if (dst) {
+                               dst_len -= opt_len;
+                               dst += opt_len;
+                       }
+                       break;
+               }
+       }
+
+       if (!opts_len) {
+               NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
+               return -EINVAL;
+       }
+
+       if (rem > 0) {
+               NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
+               return -EINVAL;
+       }
+
+       return opts_len;
+}
+
+static int tunnel_key_get_opts_len(struct nlattr *nla,
+                                  struct netlink_ext_ack *extack)
+{
+       return tunnel_key_copy_opts(nla, NULL, 0, extack);
+}
+
+static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
+                              int opts_len, struct netlink_ext_ack *extack)
+{
+       info->options_len = opts_len;
+       switch (nla_type(nla_data(nla))) {
+       case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
+#if IS_ENABLED(CONFIG_INET)
+               info->key.tun_flags |= TUNNEL_GENEVE_OPT;
+               return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
+                                           opts_len, extack);
+#else
+               return -EAFNOSUPPORT;
+#endif
+       default:
+               NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
+               return -EINVAL;
+       }
+}
+
 static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
        [TCA_TUNNEL_KEY_PARMS]      = { .len = sizeof(struct tc_tunnel_key) },
        [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
@@ -66,11 +196,13 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
        [TCA_TUNNEL_KEY_ENC_KEY_ID]   = { .type = NLA_U32 },
        [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
        [TCA_TUNNEL_KEY_NO_CSUM]      = { .type = NLA_U8 },
+       [TCA_TUNNEL_KEY_ENC_OPTS]     = { .type = NLA_NESTED },
 };
 
 static int tunnel_key_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
-                          int ovr, int bind, struct netlink_ext_ack *extack)
+                          int ovr, int bind, bool rtnl_held,
+                          struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
        struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
@@ -81,24 +213,34 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
        struct tcf_tunnel_key *t;
        bool exists = false;
        __be16 dst_port = 0;
+       int opts_len = 0;
        __be64 key_id;
        __be16 flags;
        int ret = 0;
        int err;
 
-       if (!nla)
+       if (!nla) {
+               NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
                return -EINVAL;
+       }
 
        err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy,
-                              NULL);
-       if (err < 0)
+                              extack);
+       if (err < 0) {
+               NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
                return err;
+       }
 
-       if (!tb[TCA_TUNNEL_KEY_PARMS])
+       if (!tb[TCA_TUNNEL_KEY_PARMS]) {
+               NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
                return -EINVAL;
+       }
 
        parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
-       exists = tcf_idr_check(tn, parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (err < 0)
+               return err;
+       exists = err;
        if (exists && bind)
                return 0;
 
@@ -107,6 +249,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                break;
        case TCA_TUNNEL_KEY_ACT_SET:
                if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
+                       NL_SET_ERR_MSG(extack, "Missing tunnel key id");
                        ret = -EINVAL;
                        goto err_out;
                }
@@ -121,6 +264,15 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
                        dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
 
+               if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
+                       opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
+                                                          extack);
+                       if (opts_len < 0) {
+                               ret = opts_len;
+                               goto err_out;
+                       }
+               }
+
                if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
                    tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
                        __be32 saddr;
@@ -131,7 +283,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
 
                        metadata = __ip_tun_set_dst(saddr, daddr, 0, 0,
                                                    dst_port, flags,
-                                                   key_id, 0);
+                                                   key_id, opts_len);
                } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
                           tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
                        struct in6_addr saddr;
@@ -143,16 +295,30 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                        metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, dst_port,
                                                      0, flags,
                                                      key_id, 0);
+               } else {
+                       NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
+                       ret = -EINVAL;
+                       goto err_out;
                }
 
                if (!metadata) {
-                       ret = -EINVAL;
+                       NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
+                       ret = -ENOMEM;
                        goto err_out;
                }
 
+               if (opts_len) {
+                       ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
+                                                 &metadata->u.tun_info,
+                                                 opts_len, extack);
+                       if (ret < 0)
+                               goto err_out;
+               }
+
                metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
                break;
        default:
+               NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
                ret = -EINVAL;
                goto err_out;
        }
@@ -160,14 +326,16 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
        if (!exists) {
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_tunnel_key_ops, bind, true);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
+                       goto err_out;
+               }
 
                ret = ACT_P_CREATED;
-       } else {
+       } else if (!ovr) {
                tcf_idr_release(*a, bind);
-               if (!ovr)
-                       return -EEXIST;
+               NL_SET_ERR_MSG(extack, "TC IDR already exists");
+               return -EEXIST;
        }
 
        t = to_tunnel_key(*a);
@@ -175,8 +343,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
        ASSERT_RTNL();
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               if (ret == ACT_P_CREATED)
-                       tcf_idr_release(*a, bind);
+               tcf_idr_release(*a, bind);
+               NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
                return -ENOMEM;
        }
 
@@ -199,6 +367,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
 err_out:
        if (exists)
                tcf_idr_release(*a, bind);
+       else
+               tcf_idr_cleanup(tn, parm->index);
        return ret;
 }
 
@@ -216,6 +386,61 @@ static void tunnel_key_release(struct tc_action *a)
        }
 }
 
+static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
+                                      const struct ip_tunnel_info *info)
+{
+       int len = info->options_len;
+       u8 *src = (u8 *)(info + 1);
+       struct nlattr *start;
+
+       start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
+       if (!start)
+               return -EMSGSIZE;
+
+       while (len > 0) {
+               struct geneve_opt *opt = (struct geneve_opt *)src;
+
+               if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
+                                opt->opt_class) ||
+                   nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
+                              opt->type) ||
+                   nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
+                           opt->length * 4, opt + 1))
+                       return -EMSGSIZE;
+
+               len -= sizeof(struct geneve_opt) + opt->length * 4;
+               src += sizeof(struct geneve_opt) + opt->length * 4;
+       }
+
+       nla_nest_end(skb, start);
+       return 0;
+}
+
+static int tunnel_key_opts_dump(struct sk_buff *skb,
+                               const struct ip_tunnel_info *info)
+{
+       struct nlattr *start;
+       int err;
+
+       if (!info->options_len)
+               return 0;
+
+       start = nla_nest_start(skb, TCA_TUNNEL_KEY_ENC_OPTS);
+       if (!start)
+               return -EMSGSIZE;
+
+       if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+               err = tunnel_key_geneve_opts_dump(skb, info);
+               if (err)
+                       return err;
+       } else {
+               return -EINVAL;
+       }
+
+       nla_nest_end(skb, start);
+       return 0;
+}
+
 static int tunnel_key_dump_addresses(struct sk_buff *skb,
                                     const struct ip_tunnel_info *info)
 {
@@ -252,8 +477,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
        struct tcf_tunnel_key_params *params;
        struct tc_tunnel_key opt = {
                .index    = t->tcf_index,
-               .refcnt   = t->tcf_refcnt - ref,
-               .bindcnt  = t->tcf_bindcnt - bind,
+               .refcnt   = refcount_read(&t->tcf_refcnt) - ref,
+               .bindcnt  = atomic_read(&t->tcf_bindcnt) - bind,
        };
        struct tcf_t tm;
 
@@ -266,8 +491,9 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
                goto nla_put_failure;
 
        if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
-               struct ip_tunnel_key *key =
-                       &params->tcft_enc_metadata->u.tun_info.key;
+               struct ip_tunnel_info *info =
+                       &params->tcft_enc_metadata->u.tun_info;
+               struct ip_tunnel_key *key = &info->key;
                __be32 key_id = tunnel_id_to_key32(key->tun_id);
 
                if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) ||
@@ -275,7 +501,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
                                              &params->tcft_enc_metadata->u.tun_info) ||
                    nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst) ||
                    nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
-                              !(key->tun_flags & TUNNEL_CSUM)))
+                              !(key->tun_flags & TUNNEL_CSUM)) ||
+                   tunnel_key_opts_dump(skb, info))
                        goto nla_put_failure;
        }
 
@@ -309,6 +536,13 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tunnel_key_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_tunnel_key_ops = {
        .kind           =       "tunnel_key",
        .type           =       TCA_ACT_TUNNEL_KEY,
@@ -319,6 +553,7 @@ static struct tc_action_ops act_tunnel_key_ops = {
        .cleanup        =       tunnel_key_release,
        .walk           =       tunnel_key_walker,
        .lookup         =       tunnel_key_search,
+       .delete         =       tunnel_key_delete,
        .size           =       sizeof(struct tcf_tunnel_key),
 };
 
index 1fb39e1f9d077beb4fdb440459f18116b561f334..ad37f308175ad0ab574356552a5c9d9328d5b2a2 100644 (file)
@@ -109,7 +109,8 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
 
 static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
-                        int ovr, int bind, struct netlink_ext_ack *extack)
+                        int ovr, int bind, bool rtnl_held,
+                        struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, vlan_net_id);
        struct nlattr *tb[TCA_VLAN_MAX + 1];
@@ -133,7 +134,10 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        if (!tb[TCA_VLAN_PARMS])
                return -EINVAL;
        parm = nla_data(tb[TCA_VLAN_PARMS]);
-       exists = tcf_idr_check(tn, parm->index, a, bind);
+       err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+       if (err < 0)
+               return err;
+       exists = err;
        if (exists && bind)
                return 0;
 
@@ -145,12 +149,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
                        if (exists)
                                tcf_idr_release(*a, bind);
+                       else
+                               tcf_idr_cleanup(tn, parm->index);
                        return -EINVAL;
                }
                push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
                if (push_vid >= VLAN_VID_MASK) {
                        if (exists)
                                tcf_idr_release(*a, bind);
+                       else
+                               tcf_idr_cleanup(tn, parm->index);
                        return -ERANGE;
                }
 
@@ -163,6 +171,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                        default:
                                if (exists)
                                        tcf_idr_release(*a, bind);
+                               else
+                                       tcf_idr_cleanup(tn, parm->index);
                                return -EPROTONOSUPPORT;
                        }
                } else {
@@ -175,6 +185,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        default:
                if (exists)
                        tcf_idr_release(*a, bind);
+               else
+                       tcf_idr_cleanup(tn, parm->index);
                return -EINVAL;
        }
        action = parm->v_action;
@@ -182,14 +194,15 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        if (!exists) {
                ret = tcf_idr_create(tn, parm->index, est, a,
                                     &act_vlan_ops, bind, true);
-               if (ret)
+               if (ret) {
+                       tcf_idr_cleanup(tn, parm->index);
                        return ret;
+               }
 
                ret = ACT_P_CREATED;
-       } else {
+       } else if (!ovr) {
                tcf_idr_release(*a, bind);
-               if (!ovr)
-                       return -EEXIST;
+               return -EEXIST;
        }
 
        v = to_vlan(*a);
@@ -197,8 +210,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        ASSERT_RTNL();
        p = kzalloc(sizeof(*p), GFP_KERNEL);
        if (!p) {
-               if (ret == ACT_P_CREATED)
-                       tcf_idr_release(*a, bind);
+               tcf_idr_release(*a, bind);
                return -ENOMEM;
        }
 
@@ -239,8 +251,8 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
        struct tcf_vlan_params *p = rtnl_dereference(v->vlan_p);
        struct tc_vlan opt = {
                .index    = v->tcf_index,
-               .refcnt   = v->tcf_refcnt - ref,
-               .bindcnt  = v->tcf_bindcnt - bind,
+               .refcnt   = refcount_read(&v->tcf_refcnt) - ref,
+               .bindcnt  = atomic_read(&v->tcf_bindcnt) - bind,
                .action   = v->tcf_action,
                .v_action = p->tcfv_action,
        };
@@ -286,6 +298,13 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
+static int tcf_vlan_delete(struct net *net, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, vlan_net_id);
+
+       return tcf_idr_delete_index(tn, index);
+}
+
 static struct tc_action_ops act_vlan_ops = {
        .kind           =       "vlan",
        .type           =       TCA_ACT_VLAN,
@@ -296,6 +315,7 @@ static struct tc_action_ops act_vlan_ops = {
        .cleanup        =       tcf_vlan_cleanup,
        .walk           =       tcf_vlan_walker,
        .lookup         =       tcf_vlan_search,
+       .delete         =       tcf_vlan_delete,
        .size           =       sizeof(struct tcf_vlan),
 };
 
index cdc3c87c53e62d4db4bb18fa5f59d7889b9866cb..c51b1b12450d0a0c33696779e52ff95593eb1696 100644 (file)
@@ -277,18 +277,21 @@ static bool tcf_block_offload_in_use(struct tcf_block *block)
 static int tcf_block_offload_cmd(struct tcf_block *block,
                                 struct net_device *dev,
                                 struct tcf_block_ext_info *ei,
-                                enum tc_block_command command)
+                                enum tc_block_command command,
+                                struct netlink_ext_ack *extack)
 {
        struct tc_block_offload bo = {};
 
        bo.command = command;
        bo.binder_type = ei->binder_type;
        bo.block = block;
+       bo.extack = extack;
        return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
 }
 
 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
-                                 struct tcf_block_ext_info *ei)
+                                 struct tcf_block_ext_info *ei,
+                                 struct netlink_ext_ack *extack)
 {
        struct net_device *dev = q->dev_queue->dev;
        int err;
@@ -299,10 +302,12 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
        /* If tc offload feature is disabled and the block we try to bind
         * to already has some offloaded filters, forbid to bind.
         */
-       if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
+       if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
+               NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
                return -EOPNOTSUPP;
+       }
 
-       err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
+       err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
        if (err == -EOPNOTSUPP)
                goto no_offload_dev_inc;
        return err;
@@ -322,7 +327,7 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 
        if (!dev->netdev_ops->ndo_setup_tc)
                goto no_offload_dev_dec;
-       err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
+       err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
        if (err == -EOPNOTSUPP)
                goto no_offload_dev_dec;
        return;
@@ -612,7 +617,7 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
        if (err)
                goto err_chain_head_change_cb_add;
 
-       err = tcf_block_offload_bind(block, q, ei);
+       err = tcf_block_offload_bind(block, q, ei, extack);
        if (err)
                goto err_block_offload_bind;
 
@@ -746,18 +751,53 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
 }
 EXPORT_SYMBOL(tcf_block_cb_decref);
 
+static int
+tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
+                           void *cb_priv, bool add, bool offload_in_use,
+                           struct netlink_ext_ack *extack)
+{
+       struct tcf_chain *chain;
+       struct tcf_proto *tp;
+       int err;
+
+       list_for_each_entry(chain, &block->chain_list, list) {
+               for (tp = rtnl_dereference(chain->filter_chain); tp;
+                    tp = rtnl_dereference(tp->next)) {
+                       if (tp->ops->reoffload) {
+                               err = tp->ops->reoffload(tp, add, cb, cb_priv,
+                                                        extack);
+                               if (err && add)
+                                       goto err_playback_remove;
+                       } else if (add && offload_in_use) {
+                               err = -EOPNOTSUPP;
+                               NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
+                               goto err_playback_remove;
+                       }
+               }
+       }
+
+       return 0;
+
+err_playback_remove:
+       tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
+                                   extack);
+       return err;
+}
+
 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
                                             tc_setup_cb_t *cb, void *cb_ident,
-                                            void *cb_priv)
+                                            void *cb_priv,
+                                            struct netlink_ext_ack *extack)
 {
        struct tcf_block_cb *block_cb;
+       int err;
 
-       /* At this point, playback of previous block cb calls is not supported,
-        * so forbid to register to block which already has some offloaded
-        * filters present.
-        */
-       if (tcf_block_offload_in_use(block))
-               return ERR_PTR(-EOPNOTSUPP);
+       /* Replay any already present rules */
+       err = tcf_block_playback_offloads(block, cb, cb_priv, true,
+                                         tcf_block_offload_in_use(block),
+                                         extack);
+       if (err)
+               return ERR_PTR(err);
 
        block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
        if (!block_cb)
@@ -772,17 +812,22 @@ EXPORT_SYMBOL(__tcf_block_cb_register);
 
 int tcf_block_cb_register(struct tcf_block *block,
                          tc_setup_cb_t *cb, void *cb_ident,
-                         void *cb_priv)
+                         void *cb_priv, struct netlink_ext_ack *extack)
 {
        struct tcf_block_cb *block_cb;
 
-       block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
+       block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv,
+                                          extack);
        return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
 }
 EXPORT_SYMBOL(tcf_block_cb_register);
 
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
+void __tcf_block_cb_unregister(struct tcf_block *block,
+                              struct tcf_block_cb *block_cb)
 {
+       tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
+                                   false, tcf_block_offload_in_use(block),
+                                   NULL);
        list_del(&block_cb->list);
        kfree(block_cb);
 }
@@ -796,7 +841,7 @@ void tcf_block_cb_unregister(struct tcf_block *block,
        block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
        if (!block_cb)
                return;
-       __tcf_block_cb_unregister(block_cb);
+       __tcf_block_cb_unregister(block, block_cb);
 }
 EXPORT_SYMBOL(tcf_block_cb_unregister);
 
@@ -1463,7 +1508,9 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
                arg.w.stop = 0;
                arg.w.skip = cb->args[1] - 1;
                arg.w.count = 0;
+               arg.w.cookie = cb->args[2];
                tp->ops->walk(tp, &arg.w);
+               cb->args[2] = arg.w.cookie;
                cb->args[1] = arg.w.count + 1;
                if (arg.w.stop)
                        return false;
@@ -1564,11 +1611,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 void tcf_exts_destroy(struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
-       LIST_HEAD(actions);
-
-       ASSERT_RTNL();
-       tcf_exts_to_list(exts, &actions);
-       tcf_action_destroy(&actions, TCA_ACT_UNBIND);
+       tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
        kfree(exts->actions);
        exts->nr_actions = 0;
 #endif
@@ -1587,7 +1630,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                if (exts->police && tb[exts->police]) {
                        act = tcf_action_init_1(net, tp, tb[exts->police],
                                                rate_tlv, "police", ovr,
-                                               TCA_ACT_BIND, extack);
+                                               TCA_ACT_BIND, true, extack);
                        if (IS_ERR(act))
                                return PTR_ERR(act);
 
@@ -1595,17 +1638,15 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                        exts->actions[0] = act;
                        exts->nr_actions = 1;
                } else if (exts->action && tb[exts->action]) {
-                       LIST_HEAD(actions);
-                       int err, i = 0;
+                       int err;
 
                        err = tcf_action_init(net, tp, tb[exts->action],
                                              rate_tlv, NULL, ovr, TCA_ACT_BIND,
-                                             &actions, &attr_size, extack);
-                       if (err)
+                                             exts->actions, &attr_size, true,
+                                             extack);
+                       if (err < 0)
                                return err;
-                       list_for_each_entry(act, &actions, list)
-                               exts->actions[i++] = act;
-                       exts->nr_actions = i;
+                       exts->nr_actions = err;
                }
                exts->net = net;
        }
@@ -1654,14 +1695,11 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
                 * tc data even if iproute2  was newer - jhs
                 */
                if (exts->type != TCA_OLD_COMPAT) {
-                       LIST_HEAD(actions);
-
                        nest = nla_nest_start(skb, exts->action);
                        if (nest == NULL)
                                goto nla_put_failure;
 
-                       tcf_exts_to_list(exts, &actions);
-                       if (tcf_action_dump(skb, &actions, 0, 0) < 0)
+                       if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
                                goto nla_put_failure;
                        nla_nest_end(skb, nest);
                } else if (exts->police) {
index 1aa7f6511065a1d1da3eda4a409c66c5ce0bc773..66e0ac9811f9e3811a9b3b19bb453022c7004879 100644 (file)
@@ -43,6 +43,7 @@ struct cls_bpf_prog {
        struct tcf_result res;
        bool exts_integrated;
        u32 gen_flags;
+       unsigned int in_hw_count;
        struct tcf_exts exts;
        u32 handle;
        u16 bpf_num_ops;
@@ -174,6 +175,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
                        cls_bpf_offload_cmd(tp, oldprog, prog, extack);
                        return err;
                } else if (err > 0) {
+                       prog->in_hw_count = err;
                        tcf_block_offload_inc(block, &prog->gen_flags);
                }
        }
@@ -652,6 +654,42 @@ static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
        }
 }
 
+static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+                            void *cb_priv, struct netlink_ext_ack *extack)
+{
+       struct cls_bpf_head *head = rtnl_dereference(tp->root);
+       struct tcf_block *block = tp->chain->block;
+       struct tc_cls_bpf_offload cls_bpf = {};
+       struct cls_bpf_prog *prog;
+       int err;
+
+       list_for_each_entry(prog, &head->plist, link) {
+               if (tc_skip_hw(prog->gen_flags))
+                       continue;
+
+               tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
+                                          extack);
+               cls_bpf.command = TC_CLSBPF_OFFLOAD;
+               cls_bpf.exts = &prog->exts;
+               cls_bpf.prog = add ? prog->filter : NULL;
+               cls_bpf.oldprog = add ? NULL : prog->filter;
+               cls_bpf.name = prog->bpf_name;
+               cls_bpf.exts_integrated = prog->exts_integrated;
+
+               err = cb(TC_SETUP_CLSBPF, &cls_bpf, cb_priv);
+               if (err) {
+                       if (add && tc_skip_sw(prog->gen_flags))
+                               return err;
+                       continue;
+               }
+
+               tc_cls_offload_cnt_update(block, &prog->in_hw_count,
+                                         &prog->gen_flags, add);
+       }
+
+       return 0;
+}
+
 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
        .kind           =       "bpf",
        .owner          =       THIS_MODULE,
@@ -662,6 +700,7 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
        .change         =       cls_bpf_change,
        .delete         =       cls_bpf_delete,
        .walk           =       cls_bpf_walk,
+       .reoffload      =       cls_bpf_reoffload,
        .dump           =       cls_bpf_dump,
        .bind_class     =       cls_bpf_bind_class,
 };
index 2b5be42a9f1ca8e63952158ed2b9339e1a308d0b..c53fdd411f90def307e88ee22098313806f51e9e 100644 (file)
@@ -35,6 +35,7 @@ struct fl_flow_key {
        struct flow_dissector_key_basic basic;
        struct flow_dissector_key_eth_addrs eth;
        struct flow_dissector_key_vlan vlan;
+       struct flow_dissector_key_vlan cvlan;
        union {
                struct flow_dissector_key_ipv4_addrs ipv4;
                struct flow_dissector_key_ipv6_addrs ipv6;
@@ -66,7 +67,7 @@ struct fl_flow_mask {
        struct rhashtable_params filter_ht_params;
        struct flow_dissector dissector;
        struct list_head filters;
-       struct rcu_head rcu;
+       struct rcu_work rwork;
        struct list_head list;
 };
 
@@ -87,6 +88,7 @@ struct cls_fl_filter {
        struct list_head list;
        u32 handle;
        u32 flags;
+       unsigned int in_hw_count;
        struct rcu_work rwork;
        struct net_device *hw_dev;
 };
@@ -203,6 +205,20 @@ static int fl_init(struct tcf_proto *tp)
        return rhashtable_init(&head->ht, &mask_ht_params);
 }
 
+static void fl_mask_free(struct fl_flow_mask *mask)
+{
+       rhashtable_destroy(&mask->ht);
+       kfree(mask);
+}
+
+static void fl_mask_free_work(struct work_struct *work)
+{
+       struct fl_flow_mask *mask = container_of(to_rcu_work(work),
+                                                struct fl_flow_mask, rwork);
+
+       fl_mask_free(mask);
+}
+
 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
                        bool async)
 {
@@ -210,12 +226,11 @@ static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
                return false;
 
        rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
-       rhashtable_destroy(&mask->ht);
        list_del_rcu(&mask->list);
        if (async)
-               kfree_rcu(mask, rcu);
+               tcf_queue_work(&mask->rwork, fl_mask_free_work);
        else
-               kfree(mask);
+               fl_mask_free(mask);
 
        return true;
 }
@@ -276,6 +291,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
                fl_hw_destroy_filter(tp, f, NULL);
                return err;
        } else if (err > 0) {
+               f->in_hw_count = err;
                tcf_block_offload_inc(block, &f->flags);
        }
 
@@ -434,6 +450,9 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
        [TCA_FLOWER_KEY_IP_TOS_MASK]    = { .type = NLA_U8 },
        [TCA_FLOWER_KEY_IP_TTL]         = { .type = NLA_U8 },
        [TCA_FLOWER_KEY_IP_TTL_MASK]    = { .type = NLA_U8 },
+       [TCA_FLOWER_KEY_CVLAN_ID]       = { .type = NLA_U16 },
+       [TCA_FLOWER_KEY_CVLAN_PRIO]     = { .type = NLA_U8 },
+       [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
 };
 
 static void fl_set_key_val(struct nlattr **tb,
@@ -485,22 +504,26 @@ static int fl_set_key_mpls(struct nlattr **tb,
 }
 
 static void fl_set_key_vlan(struct nlattr **tb,
+                           __be16 ethertype,
+                           int vlan_id_key, int vlan_prio_key,
                            struct flow_dissector_key_vlan *key_val,
                            struct flow_dissector_key_vlan *key_mask)
 {
 #define VLAN_PRIORITY_MASK     0x7
 
-       if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
+       if (tb[vlan_id_key]) {
                key_val->vlan_id =
-                       nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
+                       nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
                key_mask->vlan_id = VLAN_VID_MASK;
        }
-       if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
+       if (tb[vlan_prio_key]) {
                key_val->vlan_priority =
-                       nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
+                       nla_get_u8(tb[vlan_prio_key]) &
                        VLAN_PRIORITY_MASK;
                key_mask->vlan_priority = VLAN_PRIORITY_MASK;
        }
+       key_val->vlan_tpid = ethertype;
+       key_mask->vlan_tpid = cpu_to_be16(~0);
 }
 
 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
@@ -577,12 +600,28 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
        if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
                ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
 
-               if (ethertype == htons(ETH_P_8021Q)) {
-                       fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
-                       fl_set_key_val(tb, &key->basic.n_proto,
-                                      TCA_FLOWER_KEY_VLAN_ETH_TYPE,
-                                      &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
-                                      sizeof(key->basic.n_proto));
+               if (eth_type_vlan(ethertype)) {
+                       fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
+                                       TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
+                                       &mask->vlan);
+
+                       if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
+                               ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
+                               if (eth_type_vlan(ethertype)) {
+                                       fl_set_key_vlan(tb, ethertype,
+                                                       TCA_FLOWER_KEY_CVLAN_ID,
+                                                       TCA_FLOWER_KEY_CVLAN_PRIO,
+                                                       &key->cvlan, &mask->cvlan);
+                                       fl_set_key_val(tb, &key->basic.n_proto,
+                                                      TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
+                                                      &mask->basic.n_proto,
+                                                      TCA_FLOWER_UNSPEC,
+                                                      sizeof(key->basic.n_proto));
+                               } else {
+                                       key->basic.n_proto = ethertype;
+                                       mask->basic.n_proto = cpu_to_be16(~0);
+                               }
+                       }
                } else {
                        key->basic.n_proto = ethertype;
                        mask->basic.n_proto = cpu_to_be16(~0);
@@ -807,6 +846,8 @@ static void fl_init_dissector(struct fl_flow_mask *mask)
                             FLOW_DISSECTOR_KEY_MPLS, mpls);
        FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
                             FLOW_DISSECTOR_KEY_VLAN, vlan);
+       FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+                            FLOW_DISSECTOR_KEY_CVLAN, cvlan);
        FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
                             FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
        FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
@@ -1058,20 +1099,59 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
        struct cls_fl_head *head = rtnl_dereference(tp->root);
        struct cls_fl_filter *f;
+
+       arg->count = arg->skip;
+
+       while ((f = idr_get_next_ul(&head->handle_idr,
+                                   &arg->cookie)) != NULL) {
+               if (arg->fn(tp, f, arg) < 0) {
+                       arg->stop = 1;
+                       break;
+               }
+               arg->cookie = f->handle + 1;
+               arg->count++;
+       }
+}
+
+static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+                       void *cb_priv, struct netlink_ext_ack *extack)
+{
+       struct cls_fl_head *head = rtnl_dereference(tp->root);
+       struct tc_cls_flower_offload cls_flower = {};
+       struct tcf_block *block = tp->chain->block;
        struct fl_flow_mask *mask;
+       struct cls_fl_filter *f;
+       int err;
 
-       list_for_each_entry_rcu(mask, &head->masks, list) {
-               list_for_each_entry_rcu(f, &mask->filters, list) {
-                       if (arg->count < arg->skip)
-                               goto skip;
-                       if (arg->fn(tp, f, arg) < 0) {
-                               arg->stop = 1;
-                               break;
+       list_for_each_entry(mask, &head->masks, list) {
+               list_for_each_entry(f, &mask->filters, list) {
+                       if (tc_skip_hw(f->flags))
+                               continue;
+
+                       tc_cls_common_offload_init(&cls_flower.common, tp,
+                                                  f->flags, extack);
+                       cls_flower.command = add ?
+                               TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
+                       cls_flower.cookie = (unsigned long)f;
+                       cls_flower.dissector = &mask->dissector;
+                       cls_flower.mask = &f->mkey;
+                       cls_flower.key = &f->key;
+                       cls_flower.exts = &f->exts;
+                       cls_flower.classid = f->res.classid;
+
+                       err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+                       if (err) {
+                               if (add && tc_skip_sw(f->flags))
+                                       return err;
+                               continue;
                        }
-skip:
-                       arg->count++;
+
+                       tc_cls_offload_cnt_update(block, &f->in_hw_count,
+                                                 &f->flags, add);
                }
        }
+
+       return 0;
 }
 
 static int fl_dump_key_val(struct sk_buff *skb,
@@ -1142,6 +1222,7 @@ static int fl_dump_key_ip(struct sk_buff *skb,
 }
 
 static int fl_dump_key_vlan(struct sk_buff *skb,
+                           int vlan_id_key, int vlan_prio_key,
                            struct flow_dissector_key_vlan *vlan_key,
                            struct flow_dissector_key_vlan *vlan_mask)
 {
@@ -1150,13 +1231,13 @@ static int fl_dump_key_vlan(struct sk_buff *skb,
        if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
                return 0;
        if (vlan_mask->vlan_id) {
-               err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
+               err = nla_put_u16(skb, vlan_id_key,
                                  vlan_key->vlan_id);
                if (err)
                        return err;
        }
        if (vlan_mask->vlan_priority) {
-               err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
+               err = nla_put_u8(skb, vlan_prio_key,
                                 vlan_key->vlan_priority);
                if (err)
                        return err;
@@ -1251,9 +1332,30 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
        if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
                goto nla_put_failure;
 
-       if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
+       if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
+                            TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
+               goto nla_put_failure;
+
+       if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
+                            TCA_FLOWER_KEY_CVLAN_PRIO,
+                            &key->cvlan, &mask->cvlan) ||
+           (mask->cvlan.vlan_tpid &&
+            nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+                        key->cvlan.vlan_tpid)))
                goto nla_put_failure;
 
+       if (mask->basic.n_proto) {
+               if (mask->cvlan.vlan_tpid) {
+                       if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
+                                        key->basic.n_proto))
+                               goto nla_put_failure;
+               } else if (mask->vlan.vlan_tpid) {
+                       if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+                                        key->basic.n_proto))
+                               goto nla_put_failure;
+               }
+       }
+
        if ((key->basic.n_proto == htons(ETH_P_IP) ||
             key->basic.n_proto == htons(ETH_P_IPV6)) &&
            (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
@@ -1425,6 +1527,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
        .change         = fl_change,
        .delete         = fl_delete,
        .walk           = fl_walk,
+       .reoffload      = fl_reoffload,
        .dump           = fl_dump,
        .bind_class     = fl_bind_class,
        .owner          = THIS_MODULE,
index 47b207ef77620f8baa530a8bc8c44c35ecf2df78..af16f36ed578910824cac069dad85325e3271e0e 100644 (file)
@@ -21,6 +21,7 @@ struct cls_mall_head {
        struct tcf_result res;
        u32 handle;
        u32 flags;
+       unsigned int in_hw_count;
        struct rcu_work rwork;
 };
 
@@ -95,6 +96,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
                mall_destroy_hw_filter(tp, head, cookie, NULL);
                return err;
        } else if (err > 0) {
+               head->in_hw_count = err;
                tcf_block_offload_inc(block, &head->flags);
        }
 
@@ -235,6 +237,35 @@ static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
        arg->count++;
 }
 
+static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+                         void *cb_priv, struct netlink_ext_ack *extack)
+{
+       struct cls_mall_head *head = rtnl_dereference(tp->root);
+       struct tc_cls_matchall_offload cls_mall = {};
+       struct tcf_block *block = tp->chain->block;
+       int err;
+
+       if (tc_skip_hw(head->flags))
+               return 0;
+
+       tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
+       cls_mall.command = add ?
+               TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
+       cls_mall.exts = &head->exts;
+       cls_mall.cookie = (unsigned long)head;
+
+       err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv);
+       if (err) {
+               if (add && tc_skip_sw(head->flags))
+                       return err;
+               return 0;
+       }
+
+       tc_cls_offload_cnt_update(block, &head->in_hw_count, &head->flags, add);
+
+       return 0;
+}
+
 static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
                     struct sk_buff *skb, struct tcmsg *t)
 {
@@ -289,6 +320,7 @@ static struct tcf_proto_ops cls_mall_ops __read_mostly = {
        .change         = mall_change,
        .delete         = mall_delete,
        .walk           = mall_walk,
+       .reoffload      = mall_reoffload,
        .dump           = mall_dump,
        .bind_class     = mall_bind_class,
        .owner          = THIS_MODULE,
index fb861f90fde6610d7fa4f7b6908742b307a4b9d0..d5d2a6dc39216b0ca28bd11094f0b64fda5c5964 100644 (file)
@@ -62,6 +62,7 @@ struct tc_u_knode {
        struct tc_u32_pcnt __percpu *pf;
 #endif
        u32                     flags;
+       unsigned int            in_hw_count;
 #ifdef CONFIG_CLS_U32_MARK
        u32                     val;
        u32                     mask;
@@ -571,6 +572,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
                u32_remove_hw_knode(tp, n, NULL);
                return err;
        } else if (err > 0) {
+               n->in_hw_count = err;
                tcf_block_offload_inc(block, &n->flags);
        }
 
@@ -1199,6 +1201,114 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
        }
 }
 
+static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
+                              bool add, tc_setup_cb_t *cb, void *cb_priv,
+                              struct netlink_ext_ack *extack)
+{
+       struct tc_cls_u32_offload cls_u32 = {};
+       int err;
+
+       tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
+       cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
+       cls_u32.hnode.divisor = ht->divisor;
+       cls_u32.hnode.handle = ht->handle;
+       cls_u32.hnode.prio = ht->prio;
+
+       err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
+       if (err && add && tc_skip_sw(ht->flags))
+               return err;
+
+       return 0;
+}
+
+static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
+                              bool add, tc_setup_cb_t *cb, void *cb_priv,
+                              struct netlink_ext_ack *extack)
+{
+       struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
+       struct tcf_block *block = tp->chain->block;
+       struct tc_cls_u32_offload cls_u32 = {};
+       int err;
+
+       tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
+       cls_u32.command = add ?
+               TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
+       cls_u32.knode.handle = n->handle;
+
+       if (add) {
+               cls_u32.knode.fshift = n->fshift;
+#ifdef CONFIG_CLS_U32_MARK
+               cls_u32.knode.val = n->val;
+               cls_u32.knode.mask = n->mask;
+#else
+               cls_u32.knode.val = 0;
+               cls_u32.knode.mask = 0;
+#endif
+               cls_u32.knode.sel = &n->sel;
+               cls_u32.knode.exts = &n->exts;
+               if (n->ht_down)
+                       cls_u32.knode.link_handle = ht->handle;
+       }
+
+       err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
+       if (err) {
+               if (add && tc_skip_sw(n->flags))
+                       return err;
+               return 0;
+       }
+
+       tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add);
+
+       return 0;
+}
+
+static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+                        void *cb_priv, struct netlink_ext_ack *extack)
+{
+       struct tc_u_common *tp_c = tp->data;
+       struct tc_u_hnode *ht;
+       struct tc_u_knode *n;
+       unsigned int h;
+       int err;
+
+       for (ht = rtnl_dereference(tp_c->hlist);
+            ht;
+            ht = rtnl_dereference(ht->next)) {
+               if (ht->prio != tp->prio)
+                       continue;
+
+               /* When adding filters to a new dev, try to offload the
+                * hashtable first. When removing, do the filters before the
+                * hashtable.
+                */
+               if (add && !tc_skip_hw(ht->flags)) {
+                       err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
+                                                 extack);
+                       if (err)
+                               return err;
+               }
+
+               for (h = 0; h <= ht->divisor; h++) {
+                       for (n = rtnl_dereference(ht->ht[h]);
+                            n;
+                            n = rtnl_dereference(n->next)) {
+                               if (tc_skip_hw(n->flags))
+                                       continue;
+
+                               err = u32_reoffload_knode(tp, n, add, cb,
+                                                         cb_priv, extack);
+                               if (err)
+                                       return err;
+                       }
+               }
+
+               if (!add && !tc_skip_hw(ht->flags))
+                       u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
+       }
+
+       return 0;
+}
+
 static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
 {
        struct tc_u_knode *n = fh;
@@ -1336,6 +1446,7 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = {
        .change         =       u32_change,
        .delete         =       u32_delete,
        .walk           =       u32_walk,
+       .reoffload      =       u32_reoffload,
        .dump           =       u32_dump,
        .bind_class     =       u32_bind_class,
        .owner          =       THIS_MODULE,
index 54eca685420f317b582a88508797bbc6327b75d0..98541c6399db53f5d8ae46aee0f17cad7e0a127e 100644 (file)
@@ -596,12 +596,19 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
+void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
+                                clockid_t clockid)
 {
-       hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+       hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
        wd->timer.function = qdisc_watchdog;
        wd->qdisc = qdisc;
 }
+EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
+
+void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
+{
+       qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
+}
 EXPORT_SYMBOL(qdisc_watchdog_init);
 
 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
index c98a61e980baa68931f7e974582eb1c43ed60cf5..9c4c2bb547d7ea1da26e956a77b23592d467365b 100644 (file)
@@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                             struct sk_buff **to_free)
 {
        qdisc_drop(skb, sch, to_free);
-       return NET_XMIT_SUCCESS;
+       return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 }
 
 static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
new file mode 100644 (file)
index 0000000..539c949
--- /dev/null
@@ -0,0 +1,3019 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/* COMMON Applications Kept Enhanced (CAKE) discipline
+ *
+ * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com>
+ * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk>
+ * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com>
+ * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de>
+ * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk>
+ * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au>
+ *
+ * The CAKE Principles:
+ *                (or, how to have your cake and eat it too)
+ *
+ * This is a combination of several shaping, AQM and FQ techniques into one
+ * easy-to-use package:
+ *
+ * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
+ *   equipment and bloated MACs.  This operates in deficit mode (as in sch_fq),
+ *   eliminating the need for any sort of burst parameter (eg. token bucket
+ *   depth).  Burst support is limited to that necessary to overcome scheduling
+ *   latency.
+ *
+ * - A Diffserv-aware priority queue, giving more priority to certain classes,
+ *   up to a specified fraction of bandwidth.  Above that bandwidth threshold,
+ *   the priority is reduced to avoid starving other tins.
+ *
+ * - Each priority tin has a separate Flow Queue system, to isolate traffic
+ *   flows from each other.  This prevents a burst on one flow from increasing
+ *   the delay to another.  Flows are distributed to queues using a
+ *   set-associative hash function.
+ *
+ * - Each queue is actively managed by Cobalt, which is a combination of the
+ *   Codel and Blue AQM algorithms.  This serves flows fairly, and signals
+ *   congestion early via ECN (if available) and/or packet drops, to keep
+ *   latency low.  The codel parameters are auto-tuned based on the bandwidth
+ *   setting, as is necessary at low bandwidths.
+ *
+ * The configuration parameters are kept deliberately simple for ease of use.
+ * Everything has sane defaults.  Complete generality of configuration is *not*
+ * a goal.
+ *
+ * The priority queue operates according to a weighted DRR scheme, combined with
+ * a bandwidth tracker which reuses the shaper logic to detect which side of the
+ * bandwidth sharing threshold the tin is operating.  This determines whether a
+ * priority-based weight (high) or a bandwidth-based weight (low) is used for
+ * that tin in the current pass.
+ *
+ * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly
+ * granted us permission to leverage.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/reciprocal_div.h>
+#include <net/netlink.h>
+#include <linux/version.h>
+#include <linux/if_vlan.h>
+#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
+#include <net/tcp.h>
+#include <net/flow_dissector.h>
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack_core.h>
+#endif
+
+#define CAKE_SET_WAYS (8)
+#define CAKE_MAX_TINS (8)
+#define CAKE_QUEUES (1024)
+#define CAKE_FLOW_MASK 63
+#define CAKE_FLOW_NAT_FLAG 64
+#define CAKE_SPLIT_GSO_THRESHOLD (125000000) /* 1Gbps */
+
+/* struct cobalt_params - contains codel and blue parameters
+ * @interval:  codel initial drop rate
+ * @target:     maximum persistent sojourn time & blue update rate
+ * @mtu_time:   serialisation delay of maximum-size packet
+ * @p_inc:      increment of blue drop probability (0.32 fxp)
+ * @p_dec:      decrement of blue drop probability (0.32 fxp)
+ */
+struct cobalt_params {
+       u64     interval;
+       u64     target;
+       u64     mtu_time;
+       u32     p_inc;
+       u32     p_dec;
+};
+
+/* struct cobalt_vars - contains codel and blue variables
+ * @count:             codel dropping frequency
+ * @rec_inv_sqrt:      reciprocal value of sqrt(count) >> 1
+ * @drop_next:         time to drop next packet, or when we dropped last
+ * @blue_timer:                Blue time to next drop
+ * @p_drop:            BLUE drop probability (0.32 fxp)
+ * @dropping:          set if in dropping state
+ * @ecn_marked:                set if marked
+ */
+struct cobalt_vars {
+       u32     count;
+       u32     rec_inv_sqrt;
+       ktime_t drop_next;
+       ktime_t blue_timer;
+       u32     p_drop;
+       bool    dropping;
+       bool    ecn_marked;
+};
+
+enum {
+       CAKE_SET_NONE = 0,
+       CAKE_SET_SPARSE,
+       CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */
+       CAKE_SET_BULK,
+       CAKE_SET_DECAYING
+};
+
+struct cake_flow {
+       /* this stuff is all needed per-flow at dequeue time */
+       struct sk_buff    *head;
+       struct sk_buff    *tail;
+       struct list_head  flowchain;
+       s32               deficit;
+       u32               dropped;
+       struct cobalt_vars cvars;
+       u16               srchost; /* index into cake_host table */
+       u16               dsthost;
+       u8                set;
+}; /* please try to keep this structure <= 64 bytes */
+
+struct cake_host {
+       u32 srchost_tag;
+       u32 dsthost_tag;
+       u16 srchost_refcnt;
+       u16 dsthost_refcnt;
+};
+
+struct cake_heap_entry {
+       u16 t:3, b:10;
+};
+
+struct cake_tin_data {
+       struct cake_flow flows[CAKE_QUEUES];
+       u32     backlogs[CAKE_QUEUES];
+       u32     tags[CAKE_QUEUES]; /* for set association */
+       u16     overflow_idx[CAKE_QUEUES];
+       struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
+       u16     flow_quantum;
+
+       struct cobalt_params cparams;
+       u32     drop_overlimit;
+       u16     bulk_flow_count;
+       u16     sparse_flow_count;
+       u16     decaying_flow_count;
+       u16     unresponsive_flow_count;
+
+       u32     max_skblen;
+
+       struct list_head new_flows;
+       struct list_head old_flows;
+       struct list_head decaying_flows;
+
+       /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
+       ktime_t time_next_packet;
+       u64     tin_rate_ns;
+       u64     tin_rate_bps;
+       u16     tin_rate_shft;
+
+       u16     tin_quantum_prio;
+       u16     tin_quantum_band;
+       s32     tin_deficit;
+       u32     tin_backlog;
+       u32     tin_dropped;
+       u32     tin_ecn_mark;
+
+       u32     packets;
+       u64     bytes;
+
+       u32     ack_drops;
+
+       /* moving averages */
+       u64 avge_delay;
+       u64 peak_delay;
+       u64 base_delay;
+
+       /* hash function stats */
+       u32     way_directs;
+       u32     way_hits;
+       u32     way_misses;
+       u32     way_collisions;
+}; /* number of tins is small, so size of this struct doesn't matter much */
+
+struct cake_sched_data {
+       struct tcf_proto __rcu *filter_list; /* optional external classifier */
+       struct tcf_block *block;
+       struct cake_tin_data *tins;
+
+       struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
+       u16             overflow_timeout;
+
+       u16             tin_cnt;
+       u8              tin_mode;
+       u8              flow_mode;
+       u8              ack_filter;
+       u8              atm_mode;
+
+       /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
+       u16             rate_shft;
+       ktime_t         time_next_packet;
+       ktime_t         failsafe_next_packet;
+       u64             rate_ns;
+       u64             rate_bps;
+       u16             rate_flags;
+       s16             rate_overhead;
+       u16             rate_mpu;
+       u64             interval;
+       u64             target;
+
+       /* resource tracking */
+       u32             buffer_used;
+       u32             buffer_max_used;
+       u32             buffer_limit;
+       u32             buffer_config_limit;
+
+       /* indices for dequeue */
+       u16             cur_tin;
+       u16             cur_flow;
+
+       struct qdisc_watchdog watchdog;
+       const u8        *tin_index;
+       const u8        *tin_order;
+
+       /* bandwidth capacity estimate */
+       ktime_t         last_packet_time;
+       ktime_t         avg_window_begin;
+       u64             avg_packet_interval;
+       u64             avg_window_bytes;
+       u64             avg_peak_bandwidth;
+       ktime_t         last_reconfig_time;
+
+       /* packet length stats */
+       u32             avg_netoff;
+       u16             max_netlen;
+       u16             max_adjlen;
+       u16             min_netlen;
+       u16             min_adjlen;
+};
+
+enum {
+       CAKE_FLAG_OVERHEAD         = BIT(0),
+       CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
+       CAKE_FLAG_INGRESS          = BIT(2),
+       CAKE_FLAG_WASH             = BIT(3),
+       CAKE_FLAG_SPLIT_GSO        = BIT(4)
+};
+
+/* COBALT operates the Codel and BLUE algorithms in parallel, in order to
+ * obtain the best features of each.  Codel is excellent on flows which
+ * respond to congestion signals in a TCP-like way.  BLUE is more effective on
+ * unresponsive flows.
+ */
+
+struct cobalt_skb_cb {
+       ktime_t enqueue_time;
+       u32     adjusted_len;
+};
+
+static u64 us_to_ns(u64 us)
+{
+       return us * NSEC_PER_USEC;
+}
+
+static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
+{
+       qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
+       return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
+{
+       return get_cobalt_cb(skb)->enqueue_time;
+}
+
+static void cobalt_set_enqueue_time(struct sk_buff *skb,
+                                   ktime_t now)
+{
+       get_cobalt_cb(skb)->enqueue_time = now;
+}
+
+static u16 quantum_div[CAKE_QUEUES + 1] = {0};
+
+/* Diffserv lookup tables */
+
+static const u8 precedence[] = {
+       0, 0, 0, 0, 0, 0, 0, 0,
+       1, 1, 1, 1, 1, 1, 1, 1,
+       2, 2, 2, 2, 2, 2, 2, 2,
+       3, 3, 3, 3, 3, 3, 3, 3,
+       4, 4, 4, 4, 4, 4, 4, 4,
+       5, 5, 5, 5, 5, 5, 5, 5,
+       6, 6, 6, 6, 6, 6, 6, 6,
+       7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+static const u8 diffserv8[] = {
+       2, 5, 1, 2, 4, 2, 2, 2,
+       0, 2, 1, 2, 1, 2, 1, 2,
+       5, 2, 4, 2, 4, 2, 4, 2,
+       3, 2, 3, 2, 3, 2, 3, 2,
+       6, 2, 3, 2, 3, 2, 3, 2,
+       6, 2, 2, 2, 6, 2, 6, 2,
+       7, 2, 2, 2, 2, 2, 2, 2,
+       7, 2, 2, 2, 2, 2, 2, 2,
+};
+
+static const u8 diffserv4[] = {
+       0, 2, 0, 0, 2, 0, 0, 0,
+       1, 0, 0, 0, 0, 0, 0, 0,
+       2, 0, 2, 0, 2, 0, 2, 0,
+       2, 0, 2, 0, 2, 0, 2, 0,
+       3, 0, 2, 0, 2, 0, 2, 0,
+       3, 0, 0, 0, 3, 0, 3, 0,
+       3, 0, 0, 0, 0, 0, 0, 0,
+       3, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 diffserv3[] = {
+       0, 0, 0, 0, 2, 0, 0, 0,
+       1, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 2, 0, 2, 0,
+       2, 0, 0, 0, 0, 0, 0, 0,
+       2, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const u8 besteffort[] = {
+       0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+       0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/* tin priority order for stats dumping */
+
+static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
+static const u8 bulk_order[] = {1, 0, 2, 3};
+
+#define REC_INV_SQRT_CACHE (16)
+static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
+
+/* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
+ */
+
+static void cobalt_newton_step(struct cobalt_vars *vars)
+{
+       u32 invsqrt, invsqrt2;
+       u64 val;
+
+       invsqrt = vars->rec_inv_sqrt;
+       invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
+       val = (3LL << 32) - ((u64)vars->count * invsqrt2);
+
+       val >>= 2; /* avoid overflow in following multiply */
+       val = (val * invsqrt) >> (32 - 2 + 1);
+
+       vars->rec_inv_sqrt = val;
+}
+
+static void cobalt_invsqrt(struct cobalt_vars *vars)
+{
+       if (vars->count < REC_INV_SQRT_CACHE)
+               vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
+       else
+               cobalt_newton_step(vars);
+}
+
+/* There is a big difference in timing between the accurate values placed in
+ * the cache and the approximations given by a single Newton step for small
+ * count values, particularly when stepping from count 1 to 2 or vice versa.
+ * Above 16, a single Newton step gives sufficient accuracy in either
+ * direction, given the precision stored.
+ *
+ * The magnitude of the error when stepping up to count 2 is such as to give
+ * the value that *should* have been produced at count 4.
+ */
+
+static void cobalt_cache_init(void)
+{
+       struct cobalt_vars v;
+
+       memset(&v, 0, sizeof(v));
+       v.rec_inv_sqrt = ~0U;
+       cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
+
+       for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
+               cobalt_newton_step(&v);
+               cobalt_newton_step(&v);
+               cobalt_newton_step(&v);
+               cobalt_newton_step(&v);
+
+               cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
+       }
+}
+
+static void cobalt_vars_init(struct cobalt_vars *vars)
+{
+       memset(vars, 0, sizeof(*vars));
+
+       if (!cobalt_rec_inv_sqrt_cache[0]) {
+               cobalt_cache_init();
+               cobalt_rec_inv_sqrt_cache[0] = ~0;
+       }
+}
+
+/* CoDel control_law is t + interval/sqrt(count)
+ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
+ * both sqrt() and divide operation.
+ */
+static ktime_t cobalt_control(ktime_t t,
+                             u64 interval,
+                             u32 rec_inv_sqrt)
+{
+       return ktime_add_ns(t, reciprocal_scale(interval,
+                                               rec_inv_sqrt));
+}
+
+/* Call this when a packet had to be dropped due to queue overflow.  Returns
+ * true if the BLUE state was quiescent before but active after this call.
+ */
+static bool cobalt_queue_full(struct cobalt_vars *vars,
+                             struct cobalt_params *p,
+                             ktime_t now)
+{
+       bool up = false;
+
+       if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
+               up = !vars->p_drop;
+               vars->p_drop += p->p_inc;
+               if (vars->p_drop < p->p_inc)
+                       vars->p_drop = ~0;
+               vars->blue_timer = now;
+       }
+       vars->dropping = true;
+       vars->drop_next = now;
+       if (!vars->count)
+               vars->count = 1;
+
+       return up;
+}
+
+/* Call this when the queue was serviced but turned out to be empty.  Returns
+ * true if the BLUE state was active before but quiescent after this call.
+ */
+static bool cobalt_queue_empty(struct cobalt_vars *vars,
+                              struct cobalt_params *p,
+                              ktime_t now)
+{
+       bool down = false;
+
+       if (vars->p_drop &&
+           ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
+               if (vars->p_drop < p->p_dec)
+                       vars->p_drop = 0;
+               else
+                       vars->p_drop -= p->p_dec;
+               vars->blue_timer = now;
+               down = !vars->p_drop;
+       }
+       vars->dropping = false;
+
+       if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
+               vars->count--;
+               cobalt_invsqrt(vars);
+               vars->drop_next = cobalt_control(vars->drop_next,
+                                                p->interval,
+                                                vars->rec_inv_sqrt);
+       }
+
+       return down;
+}
+
+/* Call this with a freshly dequeued packet for possible congestion marking.
+ * Returns true as an instruction to drop the packet, false for delivery.
+ */
+static bool cobalt_should_drop(struct cobalt_vars *vars,
+                              struct cobalt_params *p,
+                              ktime_t now,
+                              struct sk_buff *skb,
+                              u32 bulk_flows)
+{
+       bool next_due, over_target, drop = false;
+       ktime_t schedule;
+       u64 sojourn;
+
+/* The 'schedule' variable records, in its sign, whether 'now' is before or
+ * after 'drop_next'.  This allows 'drop_next' to be updated before the next
+ * scheduling decision is actually branched, without destroying that
+ * information.  Similarly, the first 'schedule' value calculated is preserved
+ * in the boolean 'next_due'.
+ *
+ * As for 'drop_next', we take advantage of the fact that 'interval' is both
+ * the delay between first exceeding 'target' and the first signalling event,
+ * *and* the scaling factor for the signalling frequency.  It's therefore very
+ * natural to use a single mechanism for both purposes, and eliminates a
+ * significant amount of reference Codel's spaghetti code.  To help with this,
+ * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close
+ * as possible to 1.0 in fixed-point.
+ */
+
+       sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
+       schedule = ktime_sub(now, vars->drop_next);
+       over_target = sojourn > p->target &&
+                     sojourn > p->mtu_time * bulk_flows * 2 &&
+                     sojourn > p->mtu_time * 4;
+       next_due = vars->count && ktime_to_ns(schedule) >= 0;
+
+       vars->ecn_marked = false;
+
+       if (over_target) {
+               if (!vars->dropping) {
+                       vars->dropping = true;
+                       vars->drop_next = cobalt_control(now,
+                                                        p->interval,
+                                                        vars->rec_inv_sqrt);
+               }
+               if (!vars->count)
+                       vars->count = 1;
+       } else if (vars->dropping) {
+               vars->dropping = false;
+       }
+
+       if (next_due && vars->dropping) {
+               /* Use ECN mark if possible, otherwise drop */
+               drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
+
+               vars->count++;
+               if (!vars->count)
+                       vars->count--;
+               cobalt_invsqrt(vars);
+               vars->drop_next = cobalt_control(vars->drop_next,
+                                                p->interval,
+                                                vars->rec_inv_sqrt);
+               schedule = ktime_sub(now, vars->drop_next);
+       } else {
+               while (next_due) {
+                       vars->count--;
+                       cobalt_invsqrt(vars);
+                       vars->drop_next = cobalt_control(vars->drop_next,
+                                                        p->interval,
+                                                        vars->rec_inv_sqrt);
+                       schedule = ktime_sub(now, vars->drop_next);
+                       next_due = vars->count && ktime_to_ns(schedule) >= 0;
+               }
+       }
+
+       /* Simple BLUE implementation.  Lack of ECN is deliberate. */
+       if (vars->p_drop)
+               drop |= (prandom_u32() < vars->p_drop);
+
+       /* Overload the drop_next field as an activity timeout */
+       if (!vars->count)
+               vars->drop_next = ktime_add_ns(now, p->interval);
+       else if (ktime_to_ns(schedule) > 0 && !drop)
+               vars->drop_next = now;
+
+       return drop;
+}
+
+static void cake_update_flowkeys(struct flow_keys *keys,
+                                const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+       struct nf_conntrack_tuple tuple = {};
+       bool rev = !skb->_nfct;
+
+       if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+               return;
+
+       if (!nf_ct_get_tuple_skb(&tuple, skb))
+               return;
+
+       keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
+       keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
+
+       if (keys->ports.ports) {
+               keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
+               keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
+       }
+#endif
+}
+
+/* Cake has several subtle multiple bit settings. In these cases you
+ *  would be matching triple isolate mode as well.
+ */
+
+static bool cake_dsrc(int flow_mode)
+{
+       return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC;
+}
+
+static bool cake_ddst(int flow_mode)
+{
+       return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
+}
+
+static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+                    int flow_mode)
+{
+       u32 flow_hash = 0, srchost_hash, dsthost_hash;
+       u16 reduced_hash, srchost_idx, dsthost_idx;
+       struct flow_keys keys, host_keys;
+
+       if (unlikely(flow_mode == CAKE_FLOW_NONE))
+               return 0;
+
+       skb_flow_dissect_flow_keys(skb, &keys,
+                                  FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
+       if (flow_mode & CAKE_FLOW_NAT_FLAG)
+               cake_update_flowkeys(&keys, skb);
+
+       /* flow_hash_from_keys() sorts the addresses by value, so we have
+        * to preserve their order in a separate data structure to treat
+        * src and dst host addresses as independently selectable.
+        */
+       host_keys = keys;
+       host_keys.ports.ports     = 0;
+       host_keys.basic.ip_proto  = 0;
+       host_keys.keyid.keyid     = 0;
+       host_keys.tags.flow_label = 0;
+
+       switch (host_keys.control.addr_type) {
+       case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+               host_keys.addrs.v4addrs.src = 0;
+               dsthost_hash = flow_hash_from_keys(&host_keys);
+               host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
+               host_keys.addrs.v4addrs.dst = 0;
+               srchost_hash = flow_hash_from_keys(&host_keys);
+               break;
+
+       case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+               memset(&host_keys.addrs.v6addrs.src, 0,
+                      sizeof(host_keys.addrs.v6addrs.src));
+               dsthost_hash = flow_hash_from_keys(&host_keys);
+               host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
+               memset(&host_keys.addrs.v6addrs.dst, 0,
+                      sizeof(host_keys.addrs.v6addrs.dst));
+               srchost_hash = flow_hash_from_keys(&host_keys);
+               break;
+
+       default:
+               dsthost_hash = 0;
+               srchost_hash = 0;
+       }
+
+       /* This *must* be after the above switch, since as a
+        * side-effect it sorts the src and dst addresses.
+        */
+       if (flow_mode & CAKE_FLOW_FLOWS)
+               flow_hash = flow_hash_from_keys(&keys);
+
+       if (!(flow_mode & CAKE_FLOW_FLOWS)) {
+               if (flow_mode & CAKE_FLOW_SRC_IP)
+                       flow_hash ^= srchost_hash;
+
+               if (flow_mode & CAKE_FLOW_DST_IP)
+                       flow_hash ^= dsthost_hash;
+       }
+
+       reduced_hash = flow_hash % CAKE_QUEUES;
+
+       /* set-associative hashing */
+       /* fast path if no hash collision (direct lookup succeeds) */
+       if (likely(q->tags[reduced_hash] == flow_hash &&
+                  q->flows[reduced_hash].set)) {
+               q->way_directs++;
+       } else {
+               u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
+               u32 outer_hash = reduced_hash - inner_hash;
+               bool allocate_src = false;
+               bool allocate_dst = false;
+               u32 i, k;
+
+               /* check if any active queue in the set is reserved for
+                * this flow.
+                */
+               for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
+                    i++, k = (k + 1) % CAKE_SET_WAYS) {
+                       if (q->tags[outer_hash + k] == flow_hash) {
+                               if (i)
+                                       q->way_hits++;
+
+                               if (!q->flows[outer_hash + k].set) {
+                                       /* need to increment host refcnts */
+                                       allocate_src = cake_dsrc(flow_mode);
+                                       allocate_dst = cake_ddst(flow_mode);
+                               }
+
+                               goto found;
+                       }
+               }
+
+               /* no queue is reserved for this flow, look for an
+                * empty one.
+                */
+               for (i = 0; i < CAKE_SET_WAYS;
+                        i++, k = (k + 1) % CAKE_SET_WAYS) {
+                       if (!q->flows[outer_hash + k].set) {
+                               q->way_misses++;
+                               allocate_src = cake_dsrc(flow_mode);
+                               allocate_dst = cake_ddst(flow_mode);
+                               goto found;
+                       }
+               }
+
+               /* With no empty queues, default to the original
+                * queue, accept the collision, update the host tags.
+                */
+               q->way_collisions++;
+               q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--;
+               q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--;
+               allocate_src = cake_dsrc(flow_mode);
+               allocate_dst = cake_ddst(flow_mode);
+found:
+               /* reserve queue for future packets in same flow */
+               reduced_hash = outer_hash + k;
+               q->tags[reduced_hash] = flow_hash;
+
+               if (allocate_src) {
+                       srchost_idx = srchost_hash % CAKE_QUEUES;
+                       inner_hash = srchost_idx % CAKE_SET_WAYS;
+                       outer_hash = srchost_idx - inner_hash;
+                       for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
+                               i++, k = (k + 1) % CAKE_SET_WAYS) {
+                               if (q->hosts[outer_hash + k].srchost_tag ==
+                                   srchost_hash)
+                                       goto found_src;
+                       }
+                       for (i = 0; i < CAKE_SET_WAYS;
+                               i++, k = (k + 1) % CAKE_SET_WAYS) {
+                               if (!q->hosts[outer_hash + k].srchost_refcnt)
+                                       break;
+                       }
+                       q->hosts[outer_hash + k].srchost_tag = srchost_hash;
+found_src:
+                       srchost_idx = outer_hash + k;
+                       q->hosts[srchost_idx].srchost_refcnt++;
+                       q->flows[reduced_hash].srchost = srchost_idx;
+               }
+
+               if (allocate_dst) {
+                       dsthost_idx = dsthost_hash % CAKE_QUEUES;
+                       inner_hash = dsthost_idx % CAKE_SET_WAYS;
+                       outer_hash = dsthost_idx - inner_hash;
+                       for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
+                            i++, k = (k + 1) % CAKE_SET_WAYS) {
+                               if (q->hosts[outer_hash + k].dsthost_tag ==
+                                   dsthost_hash)
+                                       goto found_dst;
+                       }
+                       for (i = 0; i < CAKE_SET_WAYS;
+                            i++, k = (k + 1) % CAKE_SET_WAYS) {
+                               if (!q->hosts[outer_hash + k].dsthost_refcnt)
+                                       break;
+                       }
+                       q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
+found_dst:
+                       dsthost_idx = outer_hash + k;
+                       q->hosts[dsthost_idx].dsthost_refcnt++;
+                       q->flows[reduced_hash].dsthost = dsthost_idx;
+               }
+       }
+
+       return reduced_hash;
+}
+
+/* helper functions : might be changed when/if skb use a standard list_head */
+/* remove one skb from head of slot queue */
+
+static struct sk_buff *dequeue_head(struct cake_flow *flow)
+{
+       struct sk_buff *skb = flow->head;
+
+       if (skb) {
+               flow->head = skb->next;
+               skb->next = NULL;
+       }
+
+       return skb;
+}
+
+/* add skb to flow queue (tail add) */
+
+static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
+{
+       if (!flow->head)
+               flow->head = skb;
+       else
+               flow->tail->next = skb;
+       flow->tail = skb;
+       skb->next = NULL;
+}
+
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+                                   struct ipv6hdr *buf)
+{
+       unsigned int offset = skb_network_offset(skb);
+       struct iphdr *iph;
+
+       iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+       if (!iph)
+               return NULL;
+
+       if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+               return skb_header_pointer(skb, offset + iph->ihl * 4,
+                                         sizeof(struct ipv6hdr), buf);
+
+       else if (iph->version == 4)
+               return iph;
+
+       else if (iph->version == 6)
+               return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+                                         buf);
+
+       return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+                                     void *buf, unsigned int bufsize)
+{
+       unsigned int offset = skb_network_offset(skb);
+       const struct ipv6hdr *ipv6h;
+       const struct tcphdr *tcph;
+       const struct iphdr *iph;
+       struct ipv6hdr _ipv6h;
+       struct tcphdr _tcph;
+
+       ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+       if (!ipv6h)
+               return NULL;
+
+       if (ipv6h->version == 4) {
+               iph = (struct iphdr *)ipv6h;
+               offset += iph->ihl * 4;
+
+               /* special-case 6in4 tunnelling, as that is a common way to get
+                * v6 connectivity in the home
+                */
+               if (iph->protocol == IPPROTO_IPV6) {
+                       ipv6h = skb_header_pointer(skb, offset,
+                                                  sizeof(_ipv6h), &_ipv6h);
+
+                       if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
+                               return NULL;
+
+                       offset += sizeof(struct ipv6hdr);
+
+               } else if (iph->protocol != IPPROTO_TCP) {
+                       return NULL;
+               }
+
+       } else if (ipv6h->version == 6) {
+               if (ipv6h->nexthdr != IPPROTO_TCP)
+                       return NULL;
+
+               offset += sizeof(struct ipv6hdr);
+       } else {
+               return NULL;
+       }
+
+       tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+       if (!tcph)
+               return NULL;
+
+       return skb_header_pointer(skb, offset,
+                                 min(__tcp_hdrlen(tcph), bufsize), buf);
+}
+
+static const void *cake_get_tcpopt(const struct tcphdr *tcph,
+                                  int code, int *oplen)
+{
+       /* inspired by tcp_parse_options in tcp_input.c */
+       int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
+       const u8 *ptr = (const u8 *)(tcph + 1);
+
+       while (length > 0) {
+               int opcode = *ptr++;
+               int opsize;
+
+               if (opcode == TCPOPT_EOL)
+                       break;
+               if (opcode == TCPOPT_NOP) {
+                       length--;
+                       continue;
+               }
+               opsize = *ptr++;
+               if (opsize < 2 || opsize > length)
+                       break;
+
+               if (opcode == code) {
+                       *oplen = opsize;
+                       return ptr;
+               }
+
+               ptr += opsize - 2;
+               length -= opsize;
+       }
+
+       return NULL;
+}
+
+/* Compare two SACK sequences. A sequence is considered greater if it SACKs more
+ * bytes than the other. In the case where both sequences ACKs bytes that the
+ * other doesn't, A is considered greater. DSACKs in A also makes A be
+ * considered greater.
+ *
+ * @return -1, 0 or 1 as normal compare functions
+ */
+static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
+                                 const struct tcphdr *tcph_b)
+{
+       const struct tcp_sack_block_wire *sack_a, *sack_b;
+       u32 ack_seq_a = ntohl(tcph_a->ack_seq);
+       u32 bytes_a = 0, bytes_b = 0;
+       int oplen_a, oplen_b;
+       bool first = true;
+
+       sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
+       sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
+
+       /* pointers point to option contents */
+       oplen_a -= TCPOLEN_SACK_BASE;
+       oplen_b -= TCPOLEN_SACK_BASE;
+
+       if (sack_a && oplen_a >= sizeof(*sack_a) &&
+           (!sack_b || oplen_b < sizeof(*sack_b)))
+               return -1;
+       else if (sack_b && oplen_b >= sizeof(*sack_b) &&
+                (!sack_a || oplen_a < sizeof(*sack_a)))
+               return 1;
+       else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
+                (!sack_b || oplen_b < sizeof(*sack_b)))
+               return 0;
+
+       while (oplen_a >= sizeof(*sack_a)) {
+               const struct tcp_sack_block_wire *sack_tmp = sack_b;
+               u32 start_a = get_unaligned_be32(&sack_a->start_seq);
+               u32 end_a = get_unaligned_be32(&sack_a->end_seq);
+               int oplen_tmp = oplen_b;
+               bool found = false;
+
+               /* DSACK; always considered greater to prevent dropping */
+               if (before(start_a, ack_seq_a))
+                       return -1;
+
+               bytes_a += end_a - start_a;
+
+               while (oplen_tmp >= sizeof(*sack_tmp)) {
+                       u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
+                       u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
+
+                       /* first time through we count the total size */
+                       if (first)
+                               bytes_b += end_b - start_b;
+
+                       if (!after(start_b, start_a) && !before(end_b, end_a)) {
+                               found = true;
+                               if (!first)
+                                       break;
+                       }
+                       oplen_tmp -= sizeof(*sack_tmp);
+                       sack_tmp++;
+               }
+
+               if (!found)
+                       return -1;
+
+               oplen_a -= sizeof(*sack_a);
+               sack_a++;
+               first = false;
+       }
+
+       /* If we made it this far, all ranges SACKed by A are covered by B, so
+        * either the SACKs are equal, or B SACKs more bytes.
+        */
+       return bytes_b > bytes_a ? 1 : 0;
+}
+
+static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
+                                u32 *tsval, u32 *tsecr)
+{
+       const u8 *ptr;
+       int opsize;
+
+       ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
+
+       if (ptr && opsize == TCPOLEN_TIMESTAMP) {
+               *tsval = get_unaligned_be32(ptr);
+               *tsecr = get_unaligned_be32(ptr + 4);
+       }
+}
+
+static bool cake_tcph_may_drop(const struct tcphdr *tcph,
+                              u32 tstamp_new, u32 tsecr_new)
+{
+       /* inspired by tcp_parse_options in tcp_input.c */
+       int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
+       const u8 *ptr = (const u8 *)(tcph + 1);
+       u32 tstamp, tsecr;
+
+       /* 3 reserved flags must be unset to avoid future breakage
+        * ACK must be set
+        * ECE/CWR are handled separately
+        * All other flags URG/PSH/RST/SYN/FIN must be unset
+        * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
+        * 0x00C00000 = CWR/ECE (handled separately)
+        * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
+        */
+       if (((tcp_flag_word(tcph) &
+             cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
+               return false;
+
+       while (length > 0) {
+               int opcode = *ptr++;
+               int opsize;
+
+               if (opcode == TCPOPT_EOL)
+                       break;
+               if (opcode == TCPOPT_NOP) {
+                       length--;
+                       continue;
+               }
+               opsize = *ptr++;
+               if (opsize < 2 || opsize > length)
+                       break;
+
+               switch (opcode) {
+               case TCPOPT_MD5SIG: /* doesn't influence state */
+                       break;
+
+               case TCPOPT_SACK: /* stricter checking performed later */
+                       if (opsize % 8 != 2)
+                               return false;
+                       break;
+
+               case TCPOPT_TIMESTAMP:
+                       /* only drop timestamps lower than new */
+                       if (opsize != TCPOLEN_TIMESTAMP)
+                               return false;
+                       tstamp = get_unaligned_be32(ptr);
+                       tsecr = get_unaligned_be32(ptr + 4);
+                       if (after(tstamp, tstamp_new) ||
+                           after(tsecr, tsecr_new))
+                               return false;
+                       break;
+
+               case TCPOPT_MSS:  /* these should only be set on SYN */
+               case TCPOPT_WINDOW:
+               case TCPOPT_SACK_PERM:
+               case TCPOPT_FASTOPEN:
+               case TCPOPT_EXP:
+               default: /* don't drop if any unknown options are present */
+                       return false;
+               }
+
+               ptr += opsize - 2;
+               length -= opsize;
+       }
+
+       return true;
+}
+
+static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
+                                      struct cake_flow *flow)
+{
+       bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
+       struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
+       struct sk_buff *skb_check, *skb_prev = NULL;
+       const struct ipv6hdr *ipv6h, *ipv6h_check;
+       unsigned char _tcph[64], _tcph_check[64];
+       const struct tcphdr *tcph, *tcph_check;
+       const struct iphdr *iph, *iph_check;
+       struct ipv6hdr _iph, _iph_check;
+       const struct sk_buff *skb;
+       int seglen, num_found = 0;
+       u32 tstamp = 0, tsecr = 0;
+       __be32 elig_flags = 0;
+       int sack_comp;
+
+       /* no other possible ACKs to filter */
+       if (flow->head == flow->tail)
+               return NULL;
+
+       skb = flow->tail;
+       tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
+       iph = cake_get_iphdr(skb, &_iph);
+       if (!tcph)
+               return NULL;
+
+       cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
+
+       /* the 'triggering' packet need only have the ACK flag set.
+        * also check that SYN is not set, as there won't be any previous ACKs.
+        */
+       if ((tcp_flag_word(tcph) &
+            (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
+               return NULL;
+
+       /* the 'triggering' ACK is at the tail of the queue, we have already
+        * returned if it is the only packet in the flow. loop through the rest
+        * of the queue looking for pure ACKs with the same 5-tuple as the
+        * triggering one.
+        */
+       for (skb_check = flow->head;
+            skb_check && skb_check != skb;
+            skb_prev = skb_check, skb_check = skb_check->next) {
+               iph_check = cake_get_iphdr(skb_check, &_iph_check);
+               tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
+                                            sizeof(_tcph_check));
+
+               /* only TCP packets with matching 5-tuple are eligible, and only
+                * drop safe headers
+                */
+               if (!tcph_check || iph->version != iph_check->version ||
+                   tcph_check->source != tcph->source ||
+                   tcph_check->dest != tcph->dest)
+                       continue;
+
+               if (iph_check->version == 4) {
+                       if (iph_check->saddr != iph->saddr ||
+                           iph_check->daddr != iph->daddr)
+                               continue;
+
+                       seglen = ntohs(iph_check->tot_len) -
+                                      (4 * iph_check->ihl);
+               } else if (iph_check->version == 6) {
+                       ipv6h = (struct ipv6hdr *)iph;
+                       ipv6h_check = (struct ipv6hdr *)iph_check;
+
+                       if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
+                           ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
+                               continue;
+
+                       seglen = ntohs(ipv6h_check->payload_len);
+               } else {
+                       WARN_ON(1);  /* shouldn't happen */
+                       continue;
+               }
+
+               /* If the ECE/CWR flags changed from the previous eligible
+                * packet in the same flow, we should no longer be dropping that
+                * previous packet as this would lose information.
+                */
+               if (elig_ack && (tcp_flag_word(tcph_check) &
+                                (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
+                       elig_ack = NULL;
+                       elig_ack_prev = NULL;
+                       num_found--;
+               }
+
+               /* Check TCP options and flags, don't drop ACKs with segment
+                * data, and don't drop ACKs with a higher cumulative ACK
+                * counter than the triggering packet. Check ACK seqno here to
+                * avoid parsing SACK options of packets we are going to exclude
+                * anyway.
+                */
+               if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
+                   (seglen - __tcp_hdrlen(tcph_check)) != 0 ||
+                   after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
+                       continue;
+
+               /* Check SACK options. The triggering packet must SACK more data
+                * than the ACK under consideration, or SACK the same range but
+                * have a larger cumulative ACK counter. The latter is a
+                * pathological case, but is contained in the following check
+                * anyway, just to be safe.
+                */
+               sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
+
+               if (sack_comp < 0 ||
+                   (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
+                    sack_comp == 0))
+                       continue;
+
+               /* At this point we have found an eligible pure ACK to drop; if
+                * we are in aggressive mode, we are done. Otherwise, keep
+                * searching unless this is the second eligible ACK we
+                * found.
+                *
+                * Since we want to drop ACK closest to the head of the queue,
+                * save the first eligible ACK we find, even if we need to loop
+                * again.
+                */
+               if (!elig_ack) {
+                       elig_ack = skb_check;
+                       elig_ack_prev = skb_prev;
+                       elig_flags = (tcp_flag_word(tcph_check)
+                                     & (TCP_FLAG_ECE | TCP_FLAG_CWR));
+               }
+
+               if (num_found++ > 0)
+                       goto found;
+       }
+
+       /* We made it through the queue without finding two eligible ACKs . If
+        * we found a single eligible ACK we can drop it in aggressive mode if
+        * we can guarantee that this does not interfere with ECN flag
+        * information. We ensure this by dropping it only if the enqueued
+        * packet is consecutive with the eligible ACK, and their flags match.
+        */
+       if (elig_ack && aggressive && elig_ack->next == skb &&
+           (elig_flags == (tcp_flag_word(tcph) &
+                           (TCP_FLAG_ECE | TCP_FLAG_CWR))))
+               goto found;
+
+       return NULL;
+
+found:
+       if (elig_ack_prev)
+               elig_ack_prev->next = elig_ack->next;
+       else
+               flow->head = elig_ack->next;
+
+       elig_ack->next = NULL;
+
+       return elig_ack;
+}
+
+static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
+{
+       avg -= avg >> shift;
+       avg += sample >> shift;
+       return avg;
+}
+
+static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
+{
+       if (q->rate_flags & CAKE_FLAG_OVERHEAD)
+               len -= off;
+
+       if (q->max_netlen < len)
+               q->max_netlen = len;
+       if (q->min_netlen > len)
+               q->min_netlen = len;
+
+       len += q->rate_overhead;
+
+       if (len < q->rate_mpu)
+               len = q->rate_mpu;
+
+       if (q->atm_mode == CAKE_ATM_ATM) {
+               len += 47;
+               len /= 48;
+               len *= 53;
+       } else if (q->atm_mode == CAKE_ATM_PTM) {
+               /* Add one byte per 64 bytes or part thereof.
+                * This is conservative and easier to calculate than the
+                * precise value.
+                */
+               len += (len + 63) / 64;
+       }
+
+       if (q->max_adjlen < len)
+               q->max_adjlen = len;
+       if (q->min_adjlen > len)
+               q->min_adjlen = len;
+
+       return len;
+}
+
+static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
+{
+       const struct skb_shared_info *shinfo = skb_shinfo(skb);
+       unsigned int hdr_len, last_len = 0;
+       u32 off = skb_network_offset(skb);
+       u32 len = qdisc_pkt_len(skb);
+       u16 segs = 1;
+
+       q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+
+       if (!shinfo->gso_size)
+               return cake_calc_overhead(q, len, off);
+
+       /* borrowed from qdisc_pkt_len_init() */
+       hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+       /* + transport layer */
+       if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
+                                               SKB_GSO_TCPV6))) {
+               const struct tcphdr *th;
+               struct tcphdr _tcphdr;
+
+               th = skb_header_pointer(skb, skb_transport_offset(skb),
+                                       sizeof(_tcphdr), &_tcphdr);
+               if (likely(th))
+                       hdr_len += __tcp_hdrlen(th);
+       } else {
+               struct udphdr _udphdr;
+
+               if (skb_header_pointer(skb, skb_transport_offset(skb),
+                                      sizeof(_udphdr), &_udphdr))
+                       hdr_len += sizeof(struct udphdr);
+       }
+
+       if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
+               segs = DIV_ROUND_UP(skb->len - hdr_len,
+                                   shinfo->gso_size);
+       else
+               segs = shinfo->gso_segs;
+
+       len = shinfo->gso_size + hdr_len;
+       last_len = skb->len - shinfo->gso_size * (segs - 1);
+
+       return (cake_calc_overhead(q, len, off) * (segs - 1) +
+               cake_calc_overhead(q, last_len, off));
+}
+
+static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
+{
+       struct cake_heap_entry ii = q->overflow_heap[i];
+       struct cake_heap_entry jj = q->overflow_heap[j];
+
+       q->overflow_heap[i] = jj;
+       q->overflow_heap[j] = ii;
+
+       q->tins[ii.t].overflow_idx[ii.b] = j;
+       q->tins[jj.t].overflow_idx[jj.b] = i;
+}
+
+static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
+{
+       struct cake_heap_entry ii = q->overflow_heap[i];
+
+       return q->tins[ii.t].backlogs[ii.b];
+}
+
+static void cake_heapify(struct cake_sched_data *q, u16 i)
+{
+       static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
+       u32 mb = cake_heap_get_backlog(q, i);
+       u32 m = i;
+
+       while (m < a) {
+               u32 l = m + m + 1;
+               u32 r = l + 1;
+
+               if (l < a) {
+                       u32 lb = cake_heap_get_backlog(q, l);
+
+                       if (lb > mb) {
+                               m  = l;
+                               mb = lb;
+                       }
+               }
+
+               if (r < a) {
+                       u32 rb = cake_heap_get_backlog(q, r);
+
+                       if (rb > mb) {
+                               m  = r;
+                               mb = rb;
+                       }
+               }
+
+               if (m != i) {
+                       cake_heap_swap(q, i, m);
+                       i = m;
+               } else {
+                       break;
+               }
+       }
+}
+
+static void cake_heapify_up(struct cake_sched_data *q, u16 i)
+{
+       while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
+               u16 p = (i - 1) >> 1;
+               u32 ib = cake_heap_get_backlog(q, i);
+               u32 pb = cake_heap_get_backlog(q, p);
+
+               if (ib > pb) {
+                       cake_heap_swap(q, i, p);
+                       i = p;
+               } else {
+                       break;
+               }
+       }
+}
+
+static int cake_advance_shaper(struct cake_sched_data *q,
+                              struct cake_tin_data *b,
+                              struct sk_buff *skb,
+                              ktime_t now, bool drop)
+{
+       u32 len = get_cobalt_cb(skb)->adjusted_len;
+
+       /* charge packet bandwidth to this tin
+        * and to the global shaper.
+        */
+       if (q->rate_ns) {
+               u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft;
+               u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
+               u64 failsafe_dur = global_dur + (global_dur >> 1);
+
+               if (ktime_before(b->time_next_packet, now))
+                       b->time_next_packet = ktime_add_ns(b->time_next_packet,
+                                                          tin_dur);
+
+               else if (ktime_before(b->time_next_packet,
+                                     ktime_add_ns(now, tin_dur)))
+                       b->time_next_packet = ktime_add_ns(now, tin_dur);
+
+               q->time_next_packet = ktime_add_ns(q->time_next_packet,
+                                                  global_dur);
+               if (!drop)
+                       q->failsafe_next_packet = \
+                               ktime_add_ns(q->failsafe_next_packet,
+                                            failsafe_dur);
+       }
+       return len;
+}
+
+static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       ktime_t now = ktime_get();
+       u32 idx = 0, tin = 0, len;
+       struct cake_heap_entry qq;
+       struct cake_tin_data *b;
+       struct cake_flow *flow;
+       struct sk_buff *skb;
+
+       if (!q->overflow_timeout) {
+               int i;
+               /* Build fresh max-heap */
+               for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
+                       cake_heapify(q, i);
+       }
+       q->overflow_timeout = 65535;
+
+       /* select longest queue for pruning */
+       qq  = q->overflow_heap[0];
+       tin = qq.t;
+       idx = qq.b;
+
+       b = &q->tins[tin];
+       flow = &b->flows[idx];
+       skb = dequeue_head(flow);
+       if (unlikely(!skb)) {
+               /* heap has gone wrong, rebuild it next time */
+               q->overflow_timeout = 0;
+               return idx + (tin << 16);
+       }
+
+       if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
+               b->unresponsive_flow_count++;
+
+       len = qdisc_pkt_len(skb);
+       q->buffer_used      -= skb->truesize;
+       b->backlogs[idx]    -= len;
+       b->tin_backlog      -= len;
+       sch->qstats.backlog -= len;
+       qdisc_tree_reduce_backlog(sch, 1, len);
+
+       flow->dropped++;
+       b->tin_dropped++;
+       sch->qstats.drops++;
+
+       if (q->rate_flags & CAKE_FLAG_INGRESS)
+               cake_advance_shaper(q, b, skb, now, true);
+
+       __qdisc_drop(skb, to_free);
+       sch->q.qlen--;
+
+       cake_heapify(q, 0);
+
+       return idx + (tin << 16);
+}
+
+static void cake_wash_diffserv(struct sk_buff *skb)
+{
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+               break;
+       case htons(ETH_P_IPV6):
+               ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+               break;
+       default:
+               break;
+       }
+}
+
+static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+{
+       u8 dscp;
+
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+               if (wash && dscp)
+                       ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+               return dscp;
+
+       case htons(ETH_P_IPV6):
+               dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+               if (wash && dscp)
+                       ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+               return dscp;
+
+       case htons(ETH_P_ARP):
+               return 0x38;  /* CS7 - Net Control */
+
+       default:
+               /* If there is no Diffserv field, treat as best-effort */
+               return 0;
+       }
+}
+
+static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
+                                            struct sk_buff *skb)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       u32 tin;
+
+       if (TC_H_MAJ(skb->priority) == sch->handle &&
+           TC_H_MIN(skb->priority) > 0 &&
+           TC_H_MIN(skb->priority) <= q->tin_cnt) {
+               tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
+
+               if (q->rate_flags & CAKE_FLAG_WASH)
+                       cake_wash_diffserv(skb);
+       } else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) {
+               /* extract the Diffserv Precedence field, if it exists */
+               /* and clear DSCP bits if washing */
+               tin = q->tin_index[cake_handle_diffserv(skb,
+                               q->rate_flags & CAKE_FLAG_WASH)];
+               if (unlikely(tin >= q->tin_cnt))
+                       tin = 0;
+       } else {
+               tin = 0;
+               if (q->rate_flags & CAKE_FLAG_WASH)
+                       cake_wash_diffserv(skb);
+       }
+
+       return &q->tins[tin];
+}
+
+static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
+                        struct sk_buff *skb, int flow_mode, int *qerr)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       struct tcf_proto *filter;
+       struct tcf_result res;
+       u32 flow = 0;
+       int result;
+
+       filter = rcu_dereference_bh(q->filter_list);
+       if (!filter)
+               goto hash;
+
+       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+       result = tcf_classify(skb, filter, &res, false);
+
+       if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+               switch (result) {
+               case TC_ACT_STOLEN:
+               case TC_ACT_QUEUED:
+               case TC_ACT_TRAP:
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+                       /* fall through */
+               case TC_ACT_SHOT:
+                       return 0;
+               }
+#endif
+               if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
+                       flow = TC_H_MIN(res.classid);
+       }
+hash:
+       *t = cake_select_tin(sch, skb);
+       return flow ?: cake_hash(*t, skb, flow_mode) + 1;
+}
+
+static void cake_reconfigure(struct Qdisc *sch);
+
+static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+                       struct sk_buff **to_free)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       int len = qdisc_pkt_len(skb);
+       int uninitialized_var(ret);
+       struct sk_buff *ack = NULL;
+       ktime_t now = ktime_get();
+       struct cake_tin_data *b;
+       struct cake_flow *flow;
+       u32 idx;
+
+       /* choose flow to insert into */
+       idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+       if (idx == 0) {
+               if (ret & __NET_XMIT_BYPASS)
+                       qdisc_qstats_drop(sch);
+               __qdisc_drop(skb, to_free);
+               return ret;
+       }
+       idx--;
+       flow = &b->flows[idx];
+
+       /* ensure shaper state isn't stale */
+       if (!b->tin_backlog) {
+               if (ktime_before(b->time_next_packet, now))
+                       b->time_next_packet = now;
+
+               if (!sch->q.qlen) {
+                       if (ktime_before(q->time_next_packet, now)) {
+                               q->failsafe_next_packet = now;
+                               q->time_next_packet = now;
+                       } else if (ktime_after(q->time_next_packet, now) &&
+                                  ktime_after(q->failsafe_next_packet, now)) {
+                               u64 next = \
+                                       min(ktime_to_ns(q->time_next_packet),
+                                           ktime_to_ns(
+                                                  q->failsafe_next_packet));
+                               sch->qstats.overlimits++;
+                               qdisc_watchdog_schedule_ns(&q->watchdog, next);
+                       }
+               }
+       }
+
+       if (unlikely(len > b->max_skblen))
+               b->max_skblen = len;
+
+       if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
+               struct sk_buff *segs, *nskb;
+               netdev_features_t features = netif_skb_features(skb);
+               unsigned int slen = 0;
+
+               segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+               if (IS_ERR_OR_NULL(segs))
+                       return qdisc_drop(skb, sch, to_free);
+
+               while (segs) {
+                       nskb = segs->next;
+                       segs->next = NULL;
+                       qdisc_skb_cb(segs)->pkt_len = segs->len;
+                       cobalt_set_enqueue_time(segs, now);
+                       get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
+                                                                         segs);
+                       flow_queue_add(flow, segs);
+
+                       sch->q.qlen++;
+                       slen += segs->len;
+                       q->buffer_used += segs->truesize;
+                       b->packets++;
+                       segs = nskb;
+               }
+
+               /* stats */
+               b->bytes            += slen;
+               b->backlogs[idx]    += slen;
+               b->tin_backlog      += slen;
+               sch->qstats.backlog += slen;
+               q->avg_window_bytes += slen;
+
+               qdisc_tree_reduce_backlog(sch, 1, len);
+               consume_skb(skb);
+       } else {
+               /* not splitting */
+               cobalt_set_enqueue_time(skb, now);
+               get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
+               flow_queue_add(flow, skb);
+
+               if (q->ack_filter)
+                       ack = cake_ack_filter(q, flow);
+
+               if (ack) {
+                       b->ack_drops++;
+                       sch->qstats.drops++;
+                       b->bytes += qdisc_pkt_len(ack);
+                       len -= qdisc_pkt_len(ack);
+                       q->buffer_used += skb->truesize - ack->truesize;
+                       if (q->rate_flags & CAKE_FLAG_INGRESS)
+                               cake_advance_shaper(q, b, ack, now, true);
+
+                       qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+                       consume_skb(ack);
+               } else {
+                       sch->q.qlen++;
+                       q->buffer_used      += skb->truesize;
+               }
+
+               /* stats */
+               b->packets++;
+               b->bytes            += len;
+               b->backlogs[idx]    += len;
+               b->tin_backlog      += len;
+               sch->qstats.backlog += len;
+               q->avg_window_bytes += len;
+       }
+
+       if (q->overflow_timeout)
+               cake_heapify_up(q, b->overflow_idx[idx]);
+
+       /* incoming bandwidth capacity estimate */
+       if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
+               u64 packet_interval = \
+                       ktime_to_ns(ktime_sub(now, q->last_packet_time));
+
+               if (packet_interval > NSEC_PER_SEC)
+                       packet_interval = NSEC_PER_SEC;
+
+               /* filter out short-term bursts, eg. wifi aggregation */
+               q->avg_packet_interval = \
+                       cake_ewma(q->avg_packet_interval,
+                                 packet_interval,
+                                 (packet_interval > q->avg_packet_interval ?
+                                         2 : 8));
+
+               q->last_packet_time = now;
+
+               if (packet_interval > q->avg_packet_interval) {
+                       u64 window_interval = \
+                               ktime_to_ns(ktime_sub(now,
+                                                     q->avg_window_begin));
+                       u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
+
+                       do_div(b, window_interval);
+                       q->avg_peak_bandwidth =
+                               cake_ewma(q->avg_peak_bandwidth, b,
+                                         b > q->avg_peak_bandwidth ? 2 : 8);
+                       q->avg_window_bytes = 0;
+                       q->avg_window_begin = now;
+
+                       if (ktime_after(now,
+                                       ktime_add_ms(q->last_reconfig_time,
+                                                    250))) {
+                               q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
+                               cake_reconfigure(sch);
+                       }
+               }
+       } else {
+               q->avg_window_bytes = 0;
+               q->last_packet_time = now;
+       }
+
+       /* flowchain */
+       if (!flow->set || flow->set == CAKE_SET_DECAYING) {
+               struct cake_host *srchost = &b->hosts[flow->srchost];
+               struct cake_host *dsthost = &b->hosts[flow->dsthost];
+               u16 host_load = 1;
+
+               if (!flow->set) {
+                       list_add_tail(&flow->flowchain, &b->new_flows);
+               } else {
+                       b->decaying_flow_count--;
+                       list_move_tail(&flow->flowchain, &b->new_flows);
+               }
+               flow->set = CAKE_SET_SPARSE;
+               b->sparse_flow_count++;
+
+               if (cake_dsrc(q->flow_mode))
+                       host_load = max(host_load, srchost->srchost_refcnt);
+
+               if (cake_ddst(q->flow_mode))
+                       host_load = max(host_load, dsthost->dsthost_refcnt);
+
+               flow->deficit = (b->flow_quantum *
+                                quantum_div[host_load]) >> 16;
+       } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
+               /* this flow was empty, accounted as a sparse flow, but actually
+                * in the bulk rotation.
+                */
+               flow->set = CAKE_SET_BULK;
+               b->sparse_flow_count--;
+               b->bulk_flow_count++;
+       }
+
+       if (q->buffer_used > q->buffer_max_used)
+               q->buffer_max_used = q->buffer_used;
+
+       if (q->buffer_used > q->buffer_limit) {
+               u32 dropped = 0;
+
+               while (q->buffer_used > q->buffer_limit) {
+                       dropped++;
+                       cake_drop(sch, to_free);
+               }
+               b->drop_overlimit += dropped;
+       }
+       return NET_XMIT_SUCCESS;
+}
+
+static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       struct cake_tin_data *b = &q->tins[q->cur_tin];
+       struct cake_flow *flow = &b->flows[q->cur_flow];
+       struct sk_buff *skb = NULL;
+       u32 len;
+
+       if (flow->head) {
+               skb = dequeue_head(flow);
+               len = qdisc_pkt_len(skb);
+               b->backlogs[q->cur_flow] -= len;
+               b->tin_backlog           -= len;
+               sch->qstats.backlog      -= len;
+               q->buffer_used           -= skb->truesize;
+               sch->q.qlen--;
+
+               if (q->overflow_timeout)
+                       cake_heapify(q, b->overflow_idx[q->cur_flow]);
+       }
+       return skb;
+}
+
+/* Discard leftover packets from a tin no longer in use. */
+static void cake_clear_tin(struct Qdisc *sch, u16 tin)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       struct sk_buff *skb;
+
+       q->cur_tin = tin;
+       for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
+               while (!!(skb = cake_dequeue_one(sch)))
+                       kfree_skb(skb);
+}
+
+static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       struct cake_tin_data *b = &q->tins[q->cur_tin];
+       struct cake_host *srchost, *dsthost;
+       ktime_t now = ktime_get();
+       struct cake_flow *flow;
+       struct list_head *head;
+       bool first_flow = true;
+       struct sk_buff *skb;
+       u16 host_load;
+       u64 delay;
+       u32 len;
+
+begin:
+       if (!sch->q.qlen)
+               return NULL;
+
+       /* global hard shaper */
+       if (ktime_after(q->time_next_packet, now) &&
+           ktime_after(q->failsafe_next_packet, now)) {
+               u64 next = min(ktime_to_ns(q->time_next_packet),
+                              ktime_to_ns(q->failsafe_next_packet));
+
+               sch->qstats.overlimits++;
+               qdisc_watchdog_schedule_ns(&q->watchdog, next);
+               return NULL;
+       }
+
+       /* Choose a class to work on. */
+       if (!q->rate_ns) {
+               /* In unlimited mode, can't rely on shaper timings, just balance
+                * with DRR
+                */
+               bool wrapped = false, empty = true;
+
+               while (b->tin_deficit < 0 ||
+                      !(b->sparse_flow_count + b->bulk_flow_count)) {
+                       if (b->tin_deficit <= 0)
+                               b->tin_deficit += b->tin_quantum_band;
+                       if (b->sparse_flow_count + b->bulk_flow_count)
+                               empty = false;
+
+                       q->cur_tin++;
+                       b++;
+                       if (q->cur_tin >= q->tin_cnt) {
+                               q->cur_tin = 0;
+                               b = q->tins;
+
+                               if (wrapped) {
+                                       /* It's possible for q->qlen to be
+                                        * nonzero when we actually have no
+                                        * packets anywhere.
+                                        */
+                                       if (empty)
+                                               return NULL;
+                               } else {
+                                       wrapped = true;
+                               }
+                       }
+               }
+       } else {
+               /* In shaped mode, choose:
+                * - Highest-priority tin with queue and meeting schedule, or
+                * - The earliest-scheduled tin with queue.
+                */
+               ktime_t best_time = KTIME_MAX;
+               int tin, best_tin = 0;
+
+               for (tin = 0; tin < q->tin_cnt; tin++) {
+                       b = q->tins + tin;
+                       if ((b->sparse_flow_count + b->bulk_flow_count) > 0) {
+                               ktime_t time_to_pkt = \
+                                       ktime_sub(b->time_next_packet, now);
+
+                               if (ktime_to_ns(time_to_pkt) <= 0 ||
+                                   ktime_compare(time_to_pkt,
+                                                 best_time) <= 0) {
+                                       best_time = time_to_pkt;
+                                       best_tin = tin;
+                               }
+                       }
+               }
+
+               q->cur_tin = best_tin;
+               b = q->tins + best_tin;
+
+               /* No point in going further if no packets to deliver. */
+               if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count)))
+                       return NULL;
+       }
+
+retry:
+       /* service this class */
+       head = &b->decaying_flows;
+       if (!first_flow || list_empty(head)) {
+               head = &b->new_flows;
+               if (list_empty(head)) {
+                       head = &b->old_flows;
+                       if (unlikely(list_empty(head))) {
+                               head = &b->decaying_flows;
+                               if (unlikely(list_empty(head)))
+                                       goto begin;
+                       }
+               }
+       }
+       flow = list_first_entry(head, struct cake_flow, flowchain);
+       q->cur_flow = flow - b->flows;
+       first_flow = false;
+
+       /* triple isolation (modified DRR++) */
+       srchost = &b->hosts[flow->srchost];
+       dsthost = &b->hosts[flow->dsthost];
+       host_load = 1;
+
+       if (cake_dsrc(q->flow_mode))
+               host_load = max(host_load, srchost->srchost_refcnt);
+
+       if (cake_ddst(q->flow_mode))
+               host_load = max(host_load, dsthost->dsthost_refcnt);
+
+       WARN_ON(host_load > CAKE_QUEUES);
+
+       /* flow isolation (DRR++) */
+       if (flow->deficit <= 0) {
+               /* The shifted prandom_u32() is a way to apply dithering to
+                * avoid accumulating roundoff errors
+                */
+               flow->deficit += (b->flow_quantum * quantum_div[host_load] +
+                                 (prandom_u32() >> 16)) >> 16;
+               list_move_tail(&flow->flowchain, &b->old_flows);
+
+               /* Keep all flows with deficits out of the sparse and decaying
+                * rotations.  No non-empty flow can go into the decaying
+                * rotation, so they can't get deficits
+                */
+               if (flow->set == CAKE_SET_SPARSE) {
+                       if (flow->head) {
+                               b->sparse_flow_count--;
+                               b->bulk_flow_count++;
+                               flow->set = CAKE_SET_BULK;
+                       } else {
+                               /* we've moved it to the bulk rotation for
+                                * correct deficit accounting but we still want
+                                * to count it as a sparse flow, not a bulk one.
+                                */
+                               flow->set = CAKE_SET_SPARSE_WAIT;
+                       }
+               }
+               goto retry;
+       }
+
+       /* Retrieve a packet via the AQM */
+       while (1) {
+               skb = cake_dequeue_one(sch);
+               if (!skb) {
+                       /* this queue was actually empty */
+                       if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
+                               b->unresponsive_flow_count--;
+
+                       if (flow->cvars.p_drop || flow->cvars.count ||
+                           ktime_before(now, flow->cvars.drop_next)) {
+                               /* keep in the flowchain until the state has
+                                * decayed to rest
+                                */
+                               list_move_tail(&flow->flowchain,
+                                              &b->decaying_flows);
+                               if (flow->set == CAKE_SET_BULK) {
+                                       b->bulk_flow_count--;
+                                       b->decaying_flow_count++;
+                               } else if (flow->set == CAKE_SET_SPARSE ||
+                                          flow->set == CAKE_SET_SPARSE_WAIT) {
+                                       b->sparse_flow_count--;
+                                       b->decaying_flow_count++;
+                               }
+                               flow->set = CAKE_SET_DECAYING;
+                       } else {
+                               /* remove empty queue from the flowchain */
+                               list_del_init(&flow->flowchain);
+                               if (flow->set == CAKE_SET_SPARSE ||
+                                   flow->set == CAKE_SET_SPARSE_WAIT)
+                                       b->sparse_flow_count--;
+                               else if (flow->set == CAKE_SET_BULK)
+                                       b->bulk_flow_count--;
+                               else
+                                       b->decaying_flow_count--;
+
+                               flow->set = CAKE_SET_NONE;
+                               srchost->srchost_refcnt--;
+                               dsthost->dsthost_refcnt--;
+                       }
+                       goto begin;
+               }
+
+               /* Last packet in queue may be marked, shouldn't be dropped */
+               if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
+                                       (b->bulk_flow_count *
+                                        !!(q->rate_flags &
+                                           CAKE_FLAG_INGRESS))) ||
+                   !flow->head)
+                       break;
+
+               /* drop this packet, get another one */
+               if (q->rate_flags & CAKE_FLAG_INGRESS) {
+                       len = cake_advance_shaper(q, b, skb,
+                                                 now, true);
+                       flow->deficit -= len;
+                       b->tin_deficit -= len;
+               }
+               flow->dropped++;
+               b->tin_dropped++;
+               qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
+               qdisc_qstats_drop(sch);
+               kfree_skb(skb);
+               if (q->rate_flags & CAKE_FLAG_INGRESS)
+                       goto retry;
+       }
+
+       b->tin_ecn_mark += !!flow->cvars.ecn_marked;
+       qdisc_bstats_update(sch, skb);
+
+       /* collect delay stats */
+       delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
+       b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
+       b->peak_delay = cake_ewma(b->peak_delay, delay,
+                                 delay > b->peak_delay ? 2 : 8);
+       b->base_delay = cake_ewma(b->base_delay, delay,
+                                 delay < b->base_delay ? 2 : 8);
+
+       len = cake_advance_shaper(q, b, skb, now, false);
+       flow->deficit -= len;
+       b->tin_deficit -= len;
+
+       if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
+               u64 next = min(ktime_to_ns(q->time_next_packet),
+                              ktime_to_ns(q->failsafe_next_packet));
+
+               qdisc_watchdog_schedule_ns(&q->watchdog, next);
+       } else if (!sch->q.qlen) {
+               int i;
+
+               for (i = 0; i < q->tin_cnt; i++) {
+                       if (q->tins[i].decaying_flow_count) {
+                               ktime_t next = \
+                                       ktime_add_ns(now,
+                                                    q->tins[i].cparams.target);
+
+                               qdisc_watchdog_schedule_ns(&q->watchdog,
+                                                          ktime_to_ns(next));
+                               break;
+                       }
+               }
+       }
+
+       if (q->overflow_timeout)
+               q->overflow_timeout--;
+
+       return skb;
+}
+
+static void cake_reset(struct Qdisc *sch)
+{
+       u32 c;
+
+       for (c = 0; c < CAKE_MAX_TINS; c++)
+               cake_clear_tin(sch, c);
+}
+
+static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
+       [TCA_CAKE_BASE_RATE64]   = { .type = NLA_U64 },
+       [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 },
+       [TCA_CAKE_ATM]           = { .type = NLA_U32 },
+       [TCA_CAKE_FLOW_MODE]     = { .type = NLA_U32 },
+       [TCA_CAKE_OVERHEAD]      = { .type = NLA_S32 },
+       [TCA_CAKE_RTT]           = { .type = NLA_U32 },
+       [TCA_CAKE_TARGET]        = { .type = NLA_U32 },
+       [TCA_CAKE_AUTORATE]      = { .type = NLA_U32 },
+       [TCA_CAKE_MEMORY]        = { .type = NLA_U32 },
+       [TCA_CAKE_NAT]           = { .type = NLA_U32 },
+       [TCA_CAKE_RAW]           = { .type = NLA_U32 },
+       [TCA_CAKE_WASH]          = { .type = NLA_U32 },
+       [TCA_CAKE_MPU]           = { .type = NLA_U32 },
+       [TCA_CAKE_INGRESS]       = { .type = NLA_U32 },
+       [TCA_CAKE_ACK_FILTER]    = { .type = NLA_U32 },
+};
+
+static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
+                         u64 target_ns, u64 rtt_est_ns)
+{
+       /* convert byte-rate into time-per-byte
+        * so it will always unwedge in reasonable time.
+        */
+       static const u64 MIN_RATE = 64;
+       u32 byte_target = mtu;
+       u64 byte_target_ns;
+       u8  rate_shft = 0;
+       u64 rate_ns = 0;
+
+       b->flow_quantum = 1514;
+       if (rate) {
+               b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
+               rate_shft = 34;
+               rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
+               rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
+               while (!!(rate_ns >> 34)) {
+                       rate_ns >>= 1;
+                       rate_shft--;
+               }
+       } /* else unlimited, ie. zero delay */
+
+       b->tin_rate_bps  = rate;
+       b->tin_rate_ns   = rate_ns;
+       b->tin_rate_shft = rate_shft;
+
+       byte_target_ns = (byte_target * rate_ns) >> rate_shft;
+
+       b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
+       b->cparams.interval = max(rtt_est_ns +
+                                    b->cparams.target - target_ns,
+                                    b->cparams.target * 2);
+       b->cparams.mtu_time = byte_target_ns;
+       b->cparams.p_inc = 1 << 24; /* 1/256 */
+       b->cparams.p_dec = 1 << 20; /* 1/4096 */
+}
+
+static int cake_config_besteffort(struct Qdisc *sch)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       struct cake_tin_data *b = &q->tins[0];
+       u32 mtu = psched_mtu(qdisc_dev(sch));
+       u64 rate = q->rate_bps;
+
+       q->tin_cnt = 1;
+
+       q->tin_index = besteffort;
+       q->tin_order = normal_order;
+
+       cake_set_rate(b, rate, mtu,
+                     us_to_ns(q->target), us_to_ns(q->interval));
+       b->tin_quantum_band = 65535;
+       b->tin_quantum_prio = 65535;
+
+       return 0;
+}
+
+static int cake_config_precedence(struct Qdisc *sch)
+{
+       /* convert high-level (user visible) parameters into internal format */
+       struct cake_sched_data *q = qdisc_priv(sch);
+       u32 mtu = psched_mtu(qdisc_dev(sch));
+       u64 rate = q->rate_bps;
+       u32 quantum1 = 256;
+       u32 quantum2 = 256;
+       u32 i;
+
+       q->tin_cnt = 8;
+       q->tin_index = precedence;
+       q->tin_order = normal_order;
+
+       for (i = 0; i < q->tin_cnt; i++) {
+               struct cake_tin_data *b = &q->tins[i];
+
+               cake_set_rate(b, rate, mtu, us_to_ns(q->target),
+                             us_to_ns(q->interval));
+
+               b->tin_quantum_prio = max_t(u16, 1U, quantum1);
+               b->tin_quantum_band = max_t(u16, 1U, quantum2);
+
+               /* calculate next class's parameters */
+               rate  *= 7;
+               rate >>= 3;
+
+               quantum1  *= 3;
+               quantum1 >>= 1;
+
+               quantum2  *= 7;
+               quantum2 >>= 3;
+       }
+
+       return 0;
+}
+
+/*     List of known Diffserv codepoints:
+ *
+ *     Least Effort (CS1)
+ *     Best Effort (CS0)
+ *     Max Reliability & LLT "Lo" (TOS1)
+ *     Max Throughput (TOS2)
+ *     Min Delay (TOS4)
+ *     LLT "La" (TOS5)
+ *     Assured Forwarding 1 (AF1x) - x3
+ *     Assured Forwarding 2 (AF2x) - x3
+ *     Assured Forwarding 3 (AF3x) - x3
+ *     Assured Forwarding 4 (AF4x) - x3
+ *     Precedence Class 2 (CS2)
+ *     Precedence Class 3 (CS3)
+ *     Precedence Class 4 (CS4)
+ *     Precedence Class 5 (CS5)
+ *     Precedence Class 6 (CS6)
+ *     Precedence Class 7 (CS7)
+ *     Voice Admit (VA)
+ *     Expedited Forwarding (EF)
+
+ *     Total 25 codepoints.
+ */
+
+/*     List of traffic classes in RFC 4594:
+ *             (roughly descending order of contended priority)
+ *             (roughly ascending order of uncontended throughput)
+ *
+ *     Network Control (CS6,CS7)      - routing traffic
+ *     Telephony (EF,VA)         - aka. VoIP streams
+ *     Signalling (CS5)               - VoIP setup
+ *     Multimedia Conferencing (AF4x) - aka. video calls
+ *     Realtime Interactive (CS4)     - eg. games
+ *     Multimedia Streaming (AF3x)    - eg. YouTube, NetFlix, Twitch
+ *     Broadcast Video (CS3)
+ *     Low Latency Data (AF2x,TOS4)      - eg. database
+ *     Ops, Admin, Management (CS2,TOS1) - eg. ssh
+ *     Standard Service (CS0 & unrecognised codepoints)
+ *     High Throughput Data (AF1x,TOS2)  - eg. web traffic
+ *     Low Priority Data (CS1)           - eg. BitTorrent
+
+ *     Total 12 traffic classes.
+ */
+
+static int cake_config_diffserv8(struct Qdisc *sch)
+{
+/*     Pruned list of traffic classes for typical applications:
+ *
+ *             Network Control          (CS6, CS7)
+ *             Minimum Latency          (EF, VA, CS5, CS4)
+ *             Interactive Shell        (CS2, TOS1)
+ *             Low Latency Transactions (AF2x, TOS4)
+ *             Video Streaming          (AF4x, AF3x, CS3)
+ *             Bog Standard             (CS0 etc.)
+ *             High Throughput          (AF1x, TOS2)
+ *             Background Traffic       (CS1)
+ *
+ *             Total 8 traffic classes.
+ */
+
+       struct cake_sched_data *q = qdisc_priv(sch);
+       u32 mtu = psched_mtu(qdisc_dev(sch));
+       u64 rate = q->rate_bps;
+       u32 quantum1 = 256;
+       u32 quantum2 = 256;
+       u32 i;
+
+       q->tin_cnt = 8;
+
+       /* codepoint to class mapping */
+       q->tin_index = diffserv8;
+       q->tin_order = normal_order;
+
+       /* class characteristics */
+       for (i = 0; i < q->tin_cnt; i++) {
+               struct cake_tin_data *b = &q->tins[i];
+
+               cake_set_rate(b, rate, mtu, us_to_ns(q->target),
+                             us_to_ns(q->interval));
+
+               b->tin_quantum_prio = max_t(u16, 1U, quantum1);
+               b->tin_quantum_band = max_t(u16, 1U, quantum2);
+
+               /* calculate next class's parameters */
+               rate  *= 7;
+               rate >>= 3;
+
+               quantum1  *= 3;
+               quantum1 >>= 1;
+
+               quantum2  *= 7;
+               quantum2 >>= 3;
+       }
+
+       return 0;
+}
+
+static int cake_config_diffserv4(struct Qdisc *sch)
+{
+/*  Further pruned list of traffic classes for four-class system:
+ *
+ *         Latency Sensitive  (CS7, CS6, EF, VA, CS5, CS4)
+ *         Streaming Media    (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
+ *         Best Effort        (CS0, AF1x, TOS2, and those not specified)
+ *         Background Traffic (CS1)
+ *
+ *             Total 4 traffic classes.
+ */
+
+       struct cake_sched_data *q = qdisc_priv(sch);
+       u32 mtu = psched_mtu(qdisc_dev(sch));
+       u64 rate = q->rate_bps;
+       u32 quantum = 1024;
+
+       q->tin_cnt = 4;
+
+       /* codepoint to class mapping */
+       q->tin_index = diffserv4;
+       q->tin_order = bulk_order;
+
+       /* class characteristics */
+       cake_set_rate(&q->tins[0], rate, mtu,
+                     us_to_ns(q->target), us_to_ns(q->interval));
+       cake_set_rate(&q->tins[1], rate >> 4, mtu,
+                     us_to_ns(q->target), us_to_ns(q->interval));
+       cake_set_rate(&q->tins[2], rate >> 1, mtu,
+                     us_to_ns(q->target), us_to_ns(q->interval));
+       cake_set_rate(&q->tins[3], rate >> 2, mtu,
+                     us_to_ns(q->target), us_to_ns(q->interval));
+
+       /* priority weights */
+       q->tins[0].tin_quantum_prio = quantum;
+       q->tins[1].tin_quantum_prio = quantum >> 4;
+       q->tins[2].tin_quantum_prio = quantum << 2;
+       q->tins[3].tin_quantum_prio = quantum << 4;
+
+       /* bandwidth-sharing weights */
+       q->tins[0].tin_quantum_band = quantum;
+       q->tins[1].tin_quantum_band = quantum >> 4;
+       q->tins[2].tin_quantum_band = quantum >> 1;
+       q->tins[3].tin_quantum_band = quantum >> 2;
+
+       return 0;
+}
+
+static int cake_config_diffserv3(struct Qdisc *sch)
+{
+/*  Simplified Diffserv structure with 3 tins.
+ *             Low Priority            (CS1)
+ *             Best Effort
+ *             Latency Sensitive       (TOS4, VA, EF, CS6, CS7)
+ */
+       struct cake_sched_data *q = qdisc_priv(sch);
+       u32 mtu = psched_mtu(qdisc_dev(sch));
+       u64 rate = q->rate_bps;
+       u32 quantum = 1024;
+
+       q->tin_cnt = 3;
+
+       /* codepoint to class mapping */
+       q->tin_index = diffserv3;
+       q->tin_order = bulk_order;
+
+       /* class characteristics */
+       cake_set_rate(&q->tins[0], rate, mtu,
+                     us_to_ns(q->target), us_to_ns(q->interval));
+       cake_set_rate(&q->tins[1], rate >> 4, mtu,
+                     us_to_ns(q->target), us_to_ns(q->interval));
+       cake_set_rate(&q->tins[2], rate >> 2, mtu,
+                     us_to_ns(q->target), us_to_ns(q->interval));
+
+       /* priority weights */
+       q->tins[0].tin_quantum_prio = quantum;
+       q->tins[1].tin_quantum_prio = quantum >> 4;
+       q->tins[2].tin_quantum_prio = quantum << 4;
+
+       /* bandwidth-sharing weights */
+       q->tins[0].tin_quantum_band = quantum;
+       q->tins[1].tin_quantum_band = quantum >> 4;
+       q->tins[2].tin_quantum_band = quantum >> 2;
+
+       return 0;
+}
+
+static void cake_reconfigure(struct Qdisc *sch)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       int c, ft;
+
+       switch (q->tin_mode) {
+       case CAKE_DIFFSERV_BESTEFFORT:
+               ft = cake_config_besteffort(sch);
+               break;
+
+       case CAKE_DIFFSERV_PRECEDENCE:
+               ft = cake_config_precedence(sch);
+               break;
+
+       case CAKE_DIFFSERV_DIFFSERV8:
+               ft = cake_config_diffserv8(sch);
+               break;
+
+       case CAKE_DIFFSERV_DIFFSERV4:
+               ft = cake_config_diffserv4(sch);
+               break;
+
+       case CAKE_DIFFSERV_DIFFSERV3:
+       default:
+               ft = cake_config_diffserv3(sch);
+               break;
+       }
+
+       for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
+               cake_clear_tin(sch, c);
+               q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
+       }
+
+       q->rate_ns   = q->tins[ft].tin_rate_ns;
+       q->rate_shft = q->tins[ft].tin_rate_shft;
+
+       if (q->buffer_config_limit) {
+               q->buffer_limit = q->buffer_config_limit;
+       } else if (q->rate_bps) {
+               u64 t = q->rate_bps * q->interval;
+
+               do_div(t, USEC_PER_SEC / 4);
+               q->buffer_limit = max_t(u32, t, 4U << 20);
+       } else {
+               q->buffer_limit = ~0;
+       }
+
+       sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+       q->buffer_limit = min(q->buffer_limit,
+                             max(sch->limit * psched_mtu(qdisc_dev(sch)),
+                                 q->buffer_config_limit));
+}
+
+static int cake_change(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       struct nlattr *tb[TCA_CAKE_MAX + 1];
+       int err;
+
+       if (!opt)
+               return -EINVAL;
+
+       err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack);
+       if (err < 0)
+               return err;
+
+       if (tb[TCA_CAKE_NAT]) {
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+               q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
+               q->flow_mode |= CAKE_FLOW_NAT_FLAG *
+                       !!nla_get_u32(tb[TCA_CAKE_NAT]);
+#else
+               NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
+                                   "No conntrack support in kernel");
+               return -EOPNOTSUPP;
+#endif
+       }
+
+       if (tb[TCA_CAKE_BASE_RATE64])
+               q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
+
+       if (tb[TCA_CAKE_DIFFSERV_MODE])
+               q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
+
+       if (tb[TCA_CAKE_WASH]) {
+               if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
+                       q->rate_flags |= CAKE_FLAG_WASH;
+               else
+                       q->rate_flags &= ~CAKE_FLAG_WASH;
+       }
+
+       if (tb[TCA_CAKE_FLOW_MODE])
+               q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
+                               (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
+                                       CAKE_FLOW_MASK));
+
+       if (tb[TCA_CAKE_ATM])
+               q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
+
+       if (tb[TCA_CAKE_OVERHEAD]) {
+               q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
+               q->rate_flags |= CAKE_FLAG_OVERHEAD;
+
+               q->max_netlen = 0;
+               q->max_adjlen = 0;
+               q->min_netlen = ~0;
+               q->min_adjlen = ~0;
+       }
+
+       if (tb[TCA_CAKE_RAW]) {
+               q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
+
+               q->max_netlen = 0;
+               q->max_adjlen = 0;
+               q->min_netlen = ~0;
+               q->min_adjlen = ~0;
+       }
+
+       if (tb[TCA_CAKE_MPU])
+               q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
+
+       if (tb[TCA_CAKE_RTT]) {
+               q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
+
+               if (!q->interval)
+                       q->interval = 1;
+       }
+
+       if (tb[TCA_CAKE_TARGET]) {
+               q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
+
+               if (!q->target)
+                       q->target = 1;
+       }
+
+       if (tb[TCA_CAKE_AUTORATE]) {
+               if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
+                       q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
+               else
+                       q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
+       }
+
+       if (tb[TCA_CAKE_INGRESS]) {
+               if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
+                       q->rate_flags |= CAKE_FLAG_INGRESS;
+               else
+                       q->rate_flags &= ~CAKE_FLAG_INGRESS;
+       }
+
+       if (tb[TCA_CAKE_ACK_FILTER])
+               q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
+
+       if (tb[TCA_CAKE_MEMORY])
+               q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
+
+       if (q->rate_bps && q->rate_bps <= CAKE_SPLIT_GSO_THRESHOLD)
+               q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
+       else
+               q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
+
+       if (q->tins) {
+               sch_tree_lock(sch);
+               cake_reconfigure(sch);
+               sch_tree_unlock(sch);
+       }
+
+       return 0;
+}
+
+static void cake_destroy(struct Qdisc *sch)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+
+       qdisc_watchdog_cancel(&q->watchdog);
+       tcf_block_put(q->block);
+       kvfree(q->tins);
+}
+
+static int cake_init(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       int i, j, err;
+
+       sch->limit = 10240;
+       q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
+       q->flow_mode  = CAKE_FLOW_TRIPLE;
+
+       q->rate_bps = 0; /* unlimited by default */
+
+       q->interval = 100000; /* 100ms default */
+       q->target   =   5000; /* 5ms: codel RFC argues
+                              * for 5 to 10% of interval
+                              */
+
+       q->cur_tin = 0;
+       q->cur_flow  = 0;
+
+       qdisc_watchdog_init(&q->watchdog, sch);
+
+       if (opt) {
+               int err = cake_change(sch, opt, extack);
+
+               if (err)
+                       return err;
+       }
+
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
+       if (err)
+               return err;
+
+       quantum_div[0] = ~0;
+       for (i = 1; i <= CAKE_QUEUES; i++)
+               quantum_div[i] = 65535 / i;
+
+       q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data),
+                          GFP_KERNEL);
+       if (!q->tins)
+               goto nomem;
+
+       for (i = 0; i < CAKE_MAX_TINS; i++) {
+               struct cake_tin_data *b = q->tins + i;
+
+               INIT_LIST_HEAD(&b->new_flows);
+               INIT_LIST_HEAD(&b->old_flows);
+               INIT_LIST_HEAD(&b->decaying_flows);
+               b->sparse_flow_count = 0;
+               b->bulk_flow_count = 0;
+               b->decaying_flow_count = 0;
+
+               for (j = 0; j < CAKE_QUEUES; j++) {
+                       struct cake_flow *flow = b->flows + j;
+                       u32 k = j * CAKE_MAX_TINS + i;
+
+                       INIT_LIST_HEAD(&flow->flowchain);
+                       cobalt_vars_init(&flow->cvars);
+
+                       q->overflow_heap[k].t = i;
+                       q->overflow_heap[k].b = j;
+                       b->overflow_idx[j] = k;
+               }
+       }
+
+       cake_reconfigure(sch);
+       q->avg_peak_bandwidth = q->rate_bps;
+       q->min_netlen = ~0;
+       q->min_adjlen = ~0;
+       return 0;
+
+nomem:
+       cake_destroy(sch);
+       return -ENOMEM;
+}
+
+static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       struct nlattr *opts;
+
+       opts = nla_nest_start(skb, TCA_OPTIONS);
+       if (!opts)
+               goto nla_put_failure;
+
+       if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
+                             TCA_CAKE_PAD))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
+                       q->flow_mode & CAKE_FLOW_MASK))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
+                       !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_INGRESS,
+                       !!(q->rate_flags & CAKE_FLAG_INGRESS)))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_NAT,
+                       !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_WASH,
+                       !!(q->rate_flags & CAKE_FLAG_WASH)))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
+               goto nla_put_failure;
+
+       if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
+               if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
+                       goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
+                       !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
+               goto nla_put_failure;
+
+       return nla_nest_end(skb, opts);
+
+nla_put_failure:
+       return -1;
+}
+
+static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+       struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP);
+       struct cake_sched_data *q = qdisc_priv(sch);
+       struct nlattr *tstats, *ts;
+       int i;
+
+       if (!stats)
+               return -1;
+
+#define PUT_STAT_U32(attr, data) do {                                 \
+               if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
+                       goto nla_put_failure;                          \
+       } while (0)
+#define PUT_STAT_U64(attr, data) do {                                 \
+               if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
+                                       data, TCA_CAKE_STATS_PAD)) \
+                       goto nla_put_failure;                          \
+       } while (0)
+
+       PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
+       PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
+       PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
+       PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
+       PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
+       PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
+       PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
+       PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
+
+#undef PUT_STAT_U32
+#undef PUT_STAT_U64
+
+       tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS);
+       if (!tstats)
+               goto nla_put_failure;
+
+#define PUT_TSTAT_U32(attr, data) do {                                 \
+               if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
+                       goto nla_put_failure;                           \
+       } while (0)
+#define PUT_TSTAT_U64(attr, data) do {                                 \
+               if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
+                                       data, TCA_CAKE_TIN_STATS_PAD))  \
+                       goto nla_put_failure;                           \
+       } while (0)
+
+       for (i = 0; i < q->tin_cnt; i++) {
+               struct cake_tin_data *b = &q->tins[q->tin_order[i]];
+
+               ts = nla_nest_start(d->skb, i + 1);
+               if (!ts)
+                       goto nla_put_failure;
+
+               PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
+               PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
+               PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
+
+               PUT_TSTAT_U32(TARGET_US,
+                             ktime_to_us(ns_to_ktime(b->cparams.target)));
+               PUT_TSTAT_U32(INTERVAL_US,
+                             ktime_to_us(ns_to_ktime(b->cparams.interval)));
+
+               PUT_TSTAT_U32(SENT_PACKETS, b->packets);
+               PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
+               PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
+               PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
+
+               PUT_TSTAT_U32(PEAK_DELAY_US,
+                             ktime_to_us(ns_to_ktime(b->peak_delay)));
+               PUT_TSTAT_U32(AVG_DELAY_US,
+                             ktime_to_us(ns_to_ktime(b->avge_delay)));
+               PUT_TSTAT_U32(BASE_DELAY_US,
+                             ktime_to_us(ns_to_ktime(b->base_delay)));
+
+               PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
+               PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
+               PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
+
+               PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
+                                           b->decaying_flow_count);
+               PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
+               PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
+               PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
+
+               PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
+               nla_nest_end(d->skb, ts);
+       }
+
+#undef PUT_TSTAT_U32
+#undef PUT_TSTAT_U64
+
+       nla_nest_end(d->skb, tstats);
+       return nla_nest_end(d->skb, stats);
+
+nla_put_failure:
+       nla_nest_cancel(d->skb, stats);
+       return -1;
+}
+
+static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
+{
+       return NULL;
+}
+
+static unsigned long cake_find(struct Qdisc *sch, u32 classid)
+{
+       return 0;
+}
+
+static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
+                              u32 classid)
+{
+       return 0;
+}
+
+static void cake_unbind(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                       struct netlink_ext_ack *extack)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+
+       if (cl)
+               return NULL;
+       return q->block;
+}
+
+static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
+                          struct sk_buff *skb, struct tcmsg *tcm)
+{
+       tcm->tcm_handle |= TC_H_MIN(cl);
+       return 0;
+}
+
+static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+                                struct gnet_dump *d)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       const struct cake_flow *flow = NULL;
+       struct gnet_stats_queue qs = { 0 };
+       struct nlattr *stats;
+       u32 idx = cl - 1;
+
+       if (idx < CAKE_QUEUES * q->tin_cnt) {
+               const struct cake_tin_data *b = \
+                       &q->tins[q->tin_order[idx / CAKE_QUEUES]];
+               const struct sk_buff *skb;
+
+               flow = &b->flows[idx % CAKE_QUEUES];
+
+               if (flow->head) {
+                       sch_tree_lock(sch);
+                       skb = flow->head;
+                       while (skb) {
+                               qs.qlen++;
+                               skb = skb->next;
+                       }
+                       sch_tree_unlock(sch);
+               }
+               qs.backlog = b->backlogs[idx % CAKE_QUEUES];
+               qs.drops = flow->dropped;
+       }
+       if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
+               return -1;
+       if (flow) {
+               ktime_t now = ktime_get();
+
+               stats = nla_nest_start(d->skb, TCA_STATS_APP);
+               if (!stats)
+                       return -1;
+
+#define PUT_STAT_U32(attr, data) do {                                 \
+               if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
+                       goto nla_put_failure;                          \
+       } while (0)
+#define PUT_STAT_S32(attr, data) do {                                 \
+               if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
+                       goto nla_put_failure;                          \
+       } while (0)
+
+               PUT_STAT_S32(DEFICIT, flow->deficit);
+               PUT_STAT_U32(DROPPING, flow->cvars.dropping);
+               PUT_STAT_U32(COBALT_COUNT, flow->cvars.count);
+               PUT_STAT_U32(P_DROP, flow->cvars.p_drop);
+               if (flow->cvars.p_drop) {
+                       PUT_STAT_S32(BLUE_TIMER_US,
+                                    ktime_to_us(
+                                            ktime_sub(now,
+                                                    flow->cvars.blue_timer)));
+               }
+               if (flow->cvars.dropping) {
+                       PUT_STAT_S32(DROP_NEXT_US,
+                                    ktime_to_us(
+                                            ktime_sub(now,
+                                                      flow->cvars.drop_next)));
+               }
+
+               if (nla_nest_end(d->skb, stats) < 0)
+                       return -1;
+       }
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(d->skb, stats);
+       return -1;
+}
+
+static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+       struct cake_sched_data *q = qdisc_priv(sch);
+       unsigned int i, j;
+
+       if (arg->stop)
+               return;
+
+       for (i = 0; i < q->tin_cnt; i++) {
+               struct cake_tin_data *b = &q->tins[q->tin_order[i]];
+
+               for (j = 0; j < CAKE_QUEUES; j++) {
+                       if (list_empty(&b->flows[j].flowchain) ||
+                           arg->count < arg->skip) {
+                               arg->count++;
+                               continue;
+                       }
+                       if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
+                               arg->stop = 1;
+                               break;
+                       }
+                       arg->count++;
+               }
+       }
+}
+
+static const struct Qdisc_class_ops cake_class_ops = {
+       .leaf           =       cake_leaf,
+       .find           =       cake_find,
+       .tcf_block      =       cake_tcf_block,
+       .bind_tcf       =       cake_bind,
+       .unbind_tcf     =       cake_unbind,
+       .dump           =       cake_dump_class,
+       .dump_stats     =       cake_dump_class_stats,
+       .walk           =       cake_walk,
+};
+
+static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
+       .cl_ops         =       &cake_class_ops,
+       .id             =       "cake",
+       .priv_size      =       sizeof(struct cake_sched_data),
+       .enqueue        =       cake_enqueue,
+       .dequeue        =       cake_dequeue,
+       .peek           =       qdisc_peek_dequeued,
+       .init           =       cake_init,
+       .reset          =       cake_reset,
+       .destroy        =       cake_destroy,
+       .change         =       cake_change,
+       .dump           =       cake_dump,
+       .dump_stats     =       cake_dump_stats,
+       .owner          =       THIS_MODULE,
+};
+
+static int __init cake_module_init(void)
+{
+       return register_qdisc(&cake_qdisc_ops);
+}
+
+static void __exit cake_module_exit(void)
+{
+       unregister_qdisc(&cake_qdisc_ops);
+}
+
+module_init(cake_module_init)
+module_exit(cake_module_exit)
+MODULE_AUTHOR("Jonathan Morton");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("The CAKE shaper.");
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
new file mode 100644 (file)
index 0000000..1538d6f
--- /dev/null
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* net/sched/sch_etf.c  Earliest TxTime First queueing discipline.
+ *
+ * Authors:    Jesus Sanchez-Palencia <jesus.sanchez-palencia@intel.com>
+ *             Vinicius Costa Gomes <vinicius.gomes@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/errqueue.h>
+#include <linux/rbtree.h>
+#include <linux/skbuff.h>
+#include <linux/posix-timers.h>
+#include <net/netlink.h>
+#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
+#include <net/sock.h>
+
+#define DEADLINE_MODE_IS_ON(x) ((x)->flags & TC_ETF_DEADLINE_MODE_ON)
+#define OFFLOAD_IS_ON(x) ((x)->flags & TC_ETF_OFFLOAD_ON)
+
+struct etf_sched_data {
+       bool offload;
+       bool deadline_mode;
+       int clockid;
+       int queue;
+       s32 delta; /* in ns */
+       ktime_t last; /* The txtime of the last skb sent to the netdevice. */
+       struct rb_root head;
+       struct qdisc_watchdog watchdog;
+       ktime_t (*get_time)(void);
+};
+
+static const struct nla_policy etf_policy[TCA_ETF_MAX + 1] = {
+       [TCA_ETF_PARMS] = { .len = sizeof(struct tc_etf_qopt) },
+};
+
+static inline int validate_input_params(struct tc_etf_qopt *qopt,
+                                       struct netlink_ext_ack *extack)
+{
+       /* Check if params comply to the following rules:
+        *      * Clockid and delta must be valid.
+        *
+        *      * Dynamic clockids are not supported.
+        *
+        *      * Delta must be a positive integer.
+        *
+        * Also note that for the HW offload case, we must
+        * expect that system clocks have been synchronized to PHC.
+        */
+       if (qopt->clockid < 0) {
+               NL_SET_ERR_MSG(extack, "Dynamic clockids are not supported");
+               return -ENOTSUPP;
+       }
+
+       if (qopt->clockid != CLOCK_TAI) {
+               NL_SET_ERR_MSG(extack, "Invalid clockid. CLOCK_TAI must be used");
+               return -EINVAL;
+       }
+
+       if (qopt->delta < 0) {
+               NL_SET_ERR_MSG(extack, "Delta must be positive");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static bool is_packet_valid(struct Qdisc *sch, struct sk_buff *nskb)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+       ktime_t txtime = nskb->tstamp;
+       struct sock *sk = nskb->sk;
+       ktime_t now;
+
+       if (!sk)
+               return false;
+
+       if (!sock_flag(sk, SOCK_TXTIME))
+               return false;
+
+       /* We don't perform crosstimestamping.
+        * Drop if packet's clockid differs from qdisc's.
+        */
+       if (sk->sk_clockid != q->clockid)
+               return false;
+
+       if (sk->sk_txtime_deadline_mode != q->deadline_mode)
+               return false;
+
+       now = q->get_time();
+       if (ktime_before(txtime, now) || ktime_before(txtime, q->last))
+               return false;
+
+       return true;
+}
+
+static struct sk_buff *etf_peek_timesortedlist(struct Qdisc *sch)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+       struct rb_node *p;
+
+       p = rb_first(&q->head);
+       if (!p)
+               return NULL;
+
+       return rb_to_skb(p);
+}
+
+static void reset_watchdog(struct Qdisc *sch)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+       struct sk_buff *skb = etf_peek_timesortedlist(sch);
+       ktime_t next;
+
+       if (!skb)
+               return;
+
+       next = ktime_sub_ns(skb->tstamp, q->delta);
+       qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next));
+}
+
+static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
+{
+       struct sock_exterr_skb *serr;
+       struct sk_buff *clone;
+       ktime_t txtime = skb->tstamp;
+
+       if (!skb->sk || !(skb->sk->sk_txtime_report_errors))
+               return;
+
+       clone = skb_clone(skb, GFP_ATOMIC);
+       if (!clone)
+               return;
+
+       serr = SKB_EXT_ERR(clone);
+       serr->ee.ee_errno = err;
+       serr->ee.ee_origin = SO_EE_ORIGIN_TXTIME;
+       serr->ee.ee_type = 0;
+       serr->ee.ee_code = code;
+       serr->ee.ee_pad = 0;
+       serr->ee.ee_data = (txtime >> 32); /* high part of tstamp */
+       serr->ee.ee_info = txtime; /* low part of tstamp */
+
+       if (sock_queue_err_skb(skb->sk, clone))
+               kfree_skb(clone);
+}
+
+static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
+                                     struct sk_buff **to_free)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+       struct rb_node **p = &q->head.rb_node, *parent = NULL;
+       ktime_t txtime = nskb->tstamp;
+
+       if (!is_packet_valid(sch, nskb)) {
+               report_sock_error(nskb, EINVAL,
+                                 SO_EE_CODE_TXTIME_INVALID_PARAM);
+               return qdisc_drop(nskb, sch, to_free);
+       }
+
+       while (*p) {
+               struct sk_buff *skb;
+
+               parent = *p;
+               skb = rb_to_skb(parent);
+               if (ktime_after(txtime, skb->tstamp))
+                       p = &parent->rb_right;
+               else
+                       p = &parent->rb_left;
+       }
+       rb_link_node(&nskb->rbnode, parent, p);
+       rb_insert_color(&nskb->rbnode, &q->head);
+
+       qdisc_qstats_backlog_inc(sch, nskb);
+       sch->q.qlen++;
+
+       /* Now we may need to re-arm the qdisc watchdog for the next packet. */
+       reset_watchdog(sch);
+
+       return NET_XMIT_SUCCESS;
+}
+
+static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb,
+                                bool drop)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+
+       rb_erase(&skb->rbnode, &q->head);
+
+       /* The rbnode field in the skb re-uses these fields, now that
+        * we are done with the rbnode, reset them.
+        */
+       skb->next = NULL;
+       skb->prev = NULL;
+       skb->dev = qdisc_dev(sch);
+
+       qdisc_qstats_backlog_dec(sch, skb);
+
+       if (drop) {
+               struct sk_buff *to_free = NULL;
+
+               report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
+
+               qdisc_drop(skb, sch, &to_free);
+               kfree_skb_list(to_free);
+               qdisc_qstats_overlimit(sch);
+       } else {
+               qdisc_bstats_update(sch, skb);
+
+               q->last = skb->tstamp;
+       }
+
+       sch->q.qlen--;
+}
+
+static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+       struct sk_buff *skb;
+       ktime_t now, next;
+
+       skb = etf_peek_timesortedlist(sch);
+       if (!skb)
+               return NULL;
+
+       now = q->get_time();
+
+       /* Drop if packet has expired while in queue. */
+       if (ktime_before(skb->tstamp, now)) {
+               timesortedlist_erase(sch, skb, true);
+               skb = NULL;
+               goto out;
+       }
+
+       /* When in deadline mode, dequeue as soon as possible and change the
+        * txtime from deadline to (now + delta).
+        */
+       if (q->deadline_mode) {
+               timesortedlist_erase(sch, skb, false);
+               skb->tstamp = now;
+               goto out;
+       }
+
+       next = ktime_sub_ns(skb->tstamp, q->delta);
+
+       /* Dequeue only if now is within the [txtime - delta, txtime] range. */
+       if (ktime_after(now, next))
+               timesortedlist_erase(sch, skb, false);
+       else
+               skb = NULL;
+
+out:
+       /* Now we may need to re-arm the qdisc watchdog for the next packet. */
+       reset_watchdog(sch);
+
+       return skb;
+}
+
+static void etf_disable_offload(struct net_device *dev,
+                               struct etf_sched_data *q)
+{
+       struct tc_etf_qopt_offload etf = { };
+       const struct net_device_ops *ops;
+       int err;
+
+       if (!q->offload)
+               return;
+
+       ops = dev->netdev_ops;
+       if (!ops->ndo_setup_tc)
+               return;
+
+       etf.queue = q->queue;
+       etf.enable = 0;
+
+       err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf);
+       if (err < 0)
+               pr_warn("Couldn't disable ETF offload for queue %d\n",
+                       etf.queue);
+}
+
+static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q,
+                             struct netlink_ext_ack *extack)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+       struct tc_etf_qopt_offload etf = { };
+       int err;
+
+       if (q->offload)
+               return 0;
+
+       if (!ops->ndo_setup_tc) {
+               NL_SET_ERR_MSG(extack, "Specified device does not support ETF offload");
+               return -EOPNOTSUPP;
+       }
+
+       etf.queue = q->queue;
+       etf.enable = 1;
+
+       err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETF, &etf);
+       if (err < 0) {
+               NL_SET_ERR_MSG(extack, "Specified device failed to setup ETF hardware offload");
+               return err;
+       }
+
+       return 0;
+}
+
+static int etf_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       struct nlattr *tb[TCA_ETF_MAX + 1];
+       struct tc_etf_qopt *qopt;
+       int err;
+
+       if (!opt) {
+               NL_SET_ERR_MSG(extack,
+                              "Missing ETF qdisc options which are mandatory");
+               return -EINVAL;
+       }
+
+       err = nla_parse_nested(tb, TCA_ETF_MAX, opt, etf_policy, extack);
+       if (err < 0)
+               return err;
+
+       if (!tb[TCA_ETF_PARMS]) {
+               NL_SET_ERR_MSG(extack, "Missing mandatory ETF parameters");
+               return -EINVAL;
+       }
+
+       qopt = nla_data(tb[TCA_ETF_PARMS]);
+
+       pr_debug("delta %d clockid %d offload %s deadline %s\n",
+                qopt->delta, qopt->clockid,
+                OFFLOAD_IS_ON(qopt) ? "on" : "off",
+                DEADLINE_MODE_IS_ON(qopt) ? "on" : "off");
+
+       err = validate_input_params(qopt, extack);
+       if (err < 0)
+               return err;
+
+       q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
+
+       if (OFFLOAD_IS_ON(qopt)) {
+               err = etf_enable_offload(dev, q, extack);
+               if (err < 0)
+                       return err;
+       }
+
+       /* Everything went OK, save the parameters used. */
+       q->delta = qopt->delta;
+       q->clockid = qopt->clockid;
+       q->offload = OFFLOAD_IS_ON(qopt);
+       q->deadline_mode = DEADLINE_MODE_IS_ON(qopt);
+
+       switch (q->clockid) {
+       case CLOCK_REALTIME:
+               q->get_time = ktime_get_real;
+               break;
+       case CLOCK_MONOTONIC:
+               q->get_time = ktime_get;
+               break;
+       case CLOCK_BOOTTIME:
+               q->get_time = ktime_get_boottime;
+               break;
+       case CLOCK_TAI:
+               q->get_time = ktime_get_clocktai;
+               break;
+       default:
+               NL_SET_ERR_MSG(extack, "Clockid is not supported");
+               return -ENOTSUPP;
+       }
+
+       qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid);
+
+       return 0;
+}
+
+static void timesortedlist_clear(struct Qdisc *sch)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+       struct rb_node *p = rb_first(&q->head);
+
+       while (p) {
+               struct sk_buff *skb = rb_to_skb(p);
+
+               p = rb_next(p);
+
+               rb_erase(&skb->rbnode, &q->head);
+               rtnl_kfree_skbs(skb, skb);
+               sch->q.qlen--;
+       }
+}
+
+static void etf_reset(struct Qdisc *sch)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+
+       /* Only cancel watchdog if it's been initialized. */
+       if (q->watchdog.qdisc == sch)
+               qdisc_watchdog_cancel(&q->watchdog);
+
+       /* No matter which mode we are on, it's safe to clear both lists. */
+       timesortedlist_clear(sch);
+       __qdisc_reset_queue(&sch->q);
+
+       sch->qstats.backlog = 0;
+       sch->q.qlen = 0;
+
+       q->last = 0;
+}
+
+static void etf_destroy(struct Qdisc *sch)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+
+       /* Only cancel watchdog if it's been initialized. */
+       if (q->watchdog.qdisc == sch)
+               qdisc_watchdog_cancel(&q->watchdog);
+
+       etf_disable_offload(dev, q);
+}
+
+static int etf_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+       struct etf_sched_data *q = qdisc_priv(sch);
+       struct tc_etf_qopt opt = { };
+       struct nlattr *nest;
+
+       nest = nla_nest_start(skb, TCA_OPTIONS);
+       if (!nest)
+               goto nla_put_failure;
+
+       opt.delta = q->delta;
+       opt.clockid = q->clockid;
+       if (q->offload)
+               opt.flags |= TC_ETF_OFFLOAD_ON;
+
+       if (q->deadline_mode)
+               opt.flags |= TC_ETF_DEADLINE_MODE_ON;
+
+       if (nla_put(skb, TCA_ETF_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
+
+       return nla_nest_end(skb, nest);
+
+nla_put_failure:
+       nla_nest_cancel(skb, nest);
+       return -1;
+}
+
+static struct Qdisc_ops etf_qdisc_ops __read_mostly = {
+       .id             =       "etf",
+       .priv_size      =       sizeof(struct etf_sched_data),
+       .enqueue        =       etf_enqueue_timesortedlist,
+       .dequeue        =       etf_dequeue_timesortedlist,
+       .peek           =       etf_peek_timesortedlist,
+       .init           =       etf_init,
+       .reset          =       etf_reset,
+       .destroy        =       etf_destroy,
+       .dump           =       etf_dump,
+       .owner          =       THIS_MODULE,
+};
+
+static int __init etf_module_init(void)
+{
+       return register_qdisc(&etf_qdisc_ops);
+}
+
+static void __exit etf_module_exit(void)
+{
+       unregister_qdisc(&etf_qdisc_ops);
+}
+module_init(etf_module_init)
+module_exit(etf_module_exit)
+MODULE_LICENSE("GPL");
index 3ae9877ea2057d0ba517c84d38f6ba6a79ff6ef8..3278a76f6861576ba7e42cf9f91a62f96443cb3a 100644 (file)
@@ -1385,8 +1385,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
                if (next_time == 0 || next_time > q->root.cl_cfmin)
                        next_time = q->root.cl_cfmin;
        }
-       WARN_ON(next_time == 0);
-       qdisc_watchdog_schedule(&q->watchdog, next_time);
+       if (next_time)
+               qdisc_watchdog_schedule(&q->watchdog, next_time);
 }
 
 static int
index 2a4ab7caf5534b11e8976d242c0066c699bb7fd6..43c4bfe625a917e1447b08f1875351f8d22ec2c8 100644 (file)
@@ -126,7 +126,6 @@ struct htb_class {
 
        union {
                struct htb_class_leaf {
-                       struct list_head drop_list;
                        int             deficit[TC_HTB_MAXDEPTH];
                        struct Qdisc    *q;
                } leaf;
@@ -171,7 +170,6 @@ struct htb_sched {
        struct qdisc_watchdog   watchdog;
 
        s64                     now;    /* cached dequeue time */
-       struct list_head        drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
 
        /* time of nearest event per level (row) */
        s64                     near_ev_cache[TC_HTB_MAXDEPTH];
@@ -562,8 +560,6 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
        if (!cl->prio_activity) {
                cl->prio_activity = 1 << cl->prio;
                htb_activate_prios(q, cl);
-               list_add_tail(&cl->un.leaf.drop_list,
-                             q->drops + cl->prio);
        }
 }
 
@@ -579,7 +575,6 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
 
        htb_deactivate_prios(q, cl);
        cl->prio_activity = 0;
-       list_del_init(&cl->un.leaf.drop_list);
 }
 
 static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@@ -981,7 +976,6 @@ static void htb_reset(struct Qdisc *sch)
                        else {
                                if (cl->un.leaf.q)
                                        qdisc_reset(cl->un.leaf.q);
-                               INIT_LIST_HEAD(&cl->un.leaf.drop_list);
                        }
                        cl->prio_activity = 0;
                        cl->cmode = HTB_CAN_SEND;
@@ -993,8 +987,6 @@ static void htb_reset(struct Qdisc *sch)
        sch->qstats.backlog = 0;
        memset(q->hlevel, 0, sizeof(q->hlevel));
        memset(q->row_mask, 0, sizeof(q->row_mask));
-       for (i = 0; i < TC_HTB_NUMPRIO; i++)
-               INIT_LIST_HEAD(q->drops + i);
 }
 
 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
@@ -1024,7 +1016,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
        struct nlattr *tb[TCA_HTB_MAX + 1];
        struct tc_htb_glob *gopt;
        int err;
-       int i;
 
        qdisc_watchdog_init(&q->watchdog, sch);
        INIT_WORK(&q->work, htb_work_func);
@@ -1050,8 +1041,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
        err = qdisc_class_hash_init(&q->clhash);
        if (err < 0)
                return err;
-       for (i = 0; i < TC_HTB_NUMPRIO; i++)
-               INIT_LIST_HEAD(q->drops + i);
 
        qdisc_skb_head_init(&q->direct_queue);
 
@@ -1224,7 +1213,6 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
 
        parent->level = 0;
        memset(&parent->un.inner, 0, sizeof(parent->un.inner));
-       INIT_LIST_HEAD(&parent->un.leaf.drop_list);
        parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
        parent->tokens = parent->buffer;
        parent->ctokens = parent->cbuffer;
@@ -1418,7 +1406,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                }
 
                cl->children = 0;
-               INIT_LIST_HEAD(&cl->un.leaf.drop_list);
                RB_CLEAR_NODE(&cl->pq_node);
 
                for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
index 7d6801fc5340eff65b81037519ada115cbc23e20..ad18a205241690070aff0f459c59c099ff8f3e2a 100644 (file)
                 Fabio Ludovici <fabio.ludovici at yahoo.it>
 */
 
+struct disttable {
+       u32  size;
+       s16 table[0];
+};
+
 struct netem_sched_data {
        /* internal t(ime)fifo qdisc uses t_root and sch->limit */
        struct rb_root t_root;
@@ -99,10 +104,7 @@ struct netem_sched_data {
                u32 rho;
        } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
 
-       struct disttable {
-               u32  size;
-               s16 table[0];
-       } *delay_dist;
+       struct disttable *delay_dist;
 
        enum  {
                CLG_RANDOM,
@@ -142,6 +144,7 @@ struct netem_sched_data {
                s32 bytes_left;
        } slot;
 
+       struct disttable *slot_dist;
 };
 
 /* Time stamp put into socket buffer control block
@@ -180,7 +183,7 @@ static u32 get_crandom(struct crndstate *state)
        u64 value, rho;
        unsigned long answer;
 
-       if (state->rho == 0)    /* no correlation */
+       if (!state || state->rho == 0)  /* no correlation */
                return prandom_u32();
 
        value = prandom_u32();
@@ -601,10 +604,19 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
 static void get_slot_next(struct netem_sched_data *q, u64 now)
 {
-       q->slot.slot_next = now + q->slot_config.min_delay +
-               (prandom_u32() *
-                       (q->slot_config.max_delay -
-                               q->slot_config.min_delay) >> 32);
+       s64 next_delay;
+
+       if (!q->slot_dist)
+               next_delay = q->slot_config.min_delay +
+                               (prandom_u32() *
+                                (q->slot_config.max_delay -
+                                 q->slot_config.min_delay) >> 32);
+       else
+               next_delay = tabledist(q->slot_config.dist_delay,
+                                      (s32)(q->slot_config.dist_jitter),
+                                      NULL, q->slot_dist);
+
+       q->slot.slot_next = now + next_delay;
        q->slot.packets_left = q->slot_config.max_packets;
        q->slot.bytes_left = q->slot_config.max_bytes;
 }
@@ -721,9 +733,9 @@ static void dist_free(struct disttable *d)
  * signed 16 bit values.
  */
 
-static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
+static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
+                         const struct nlattr *attr)
 {
-       struct netem_sched_data *q = qdisc_priv(sch);
        size_t n = nla_len(attr)/sizeof(__s16);
        const __s16 *data = nla_data(attr);
        spinlock_t *root_lock;
@@ -744,7 +756,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
        root_lock = qdisc_root_sleeping_lock(sch);
 
        spin_lock_bh(root_lock);
-       swap(q->delay_dist, d);
+       swap(*tbl, d);
        spin_unlock_bh(root_lock);
 
        dist_free(d);
@@ -762,7 +774,8 @@ static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
                q->slot_config.max_bytes = INT_MAX;
        q->slot.packets_left = q->slot_config.max_packets;
        q->slot.bytes_left = q->slot_config.max_bytes;
-       if (q->slot_config.min_delay | q->slot_config.max_delay)
+       if (q->slot_config.min_delay | q->slot_config.max_delay |
+           q->slot_config.dist_jitter)
                q->slot.slot_next = ktime_get_ns();
        else
                q->slot.slot_next = 0;
@@ -926,16 +939,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
        }
 
        if (tb[TCA_NETEM_DELAY_DIST]) {
-               ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
-               if (ret) {
-                       /* recover clg and loss_model, in case of
-                        * q->clg and q->loss_model were modified
-                        * in get_loss_clg()
-                        */
-                       q->clg = old_clg;
-                       q->loss_model = old_loss_model;
-                       return ret;
-               }
+               ret = get_dist_table(sch, &q->delay_dist,
+                                    tb[TCA_NETEM_DELAY_DIST]);
+               if (ret)
+                       goto get_table_failure;
+       }
+
+       if (tb[TCA_NETEM_SLOT_DIST]) {
+               ret = get_dist_table(sch, &q->slot_dist,
+                                    tb[TCA_NETEM_SLOT_DIST]);
+               if (ret)
+                       goto get_table_failure;
        }
 
        sch->limit = qopt->limit;
@@ -983,6 +997,15 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
                get_slot(q, tb[TCA_NETEM_SLOT]);
 
        return ret;
+
+get_table_failure:
+       /* recover clg and loss_model, in case of
+        * q->clg and q->loss_model were modified
+        * in get_loss_clg()
+        */
+       q->clg = old_clg;
+       q->loss_model = old_loss_model;
+       return ret;
 }
 
 static int netem_init(struct Qdisc *sch, struct nlattr *opt,
@@ -1011,6 +1034,7 @@ static void netem_destroy(struct Qdisc *sch)
        if (q->qdisc)
                qdisc_destroy(q->qdisc);
        dist_free(q->delay_dist);
+       dist_free(q->slot_dist);
 }
 
 static int dump_loss_model(const struct netem_sched_data *q,
@@ -1127,7 +1151,8 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (dump_loss_model(q, skb) != 0)
                goto nla_put_failure;
 
-       if (q->slot_config.min_delay | q->slot_config.max_delay) {
+       if (q->slot_config.min_delay | q->slot_config.max_delay |
+           q->slot_config.dist_jitter) {
                slot = q->slot_config;
                if (slot.max_packets == INT_MAX)
                        slot.max_packets = 0;
index 5d5a16204d50516eca7d5322a60aac6511178c38..297d9cf960b928532aa2769c47f76fdb5f64efbf 100644 (file)
@@ -115,6 +115,9 @@ static struct sctp_association *sctp_association_init(
        /* Initialize path max retrans value. */
        asoc->pathmaxrxt = sp->pathmaxrxt;
 
+       asoc->flowlabel = sp->flowlabel;
+       asoc->dscp = sp->dscp;
+
        /* Initialize default path MTU. */
        asoc->pathmtu = sp->pathmtu;
 
@@ -647,6 +650,18 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
        peer->sackdelay = asoc->sackdelay;
        peer->sackfreq = asoc->sackfreq;
 
+       if (addr->sa.sa_family == AF_INET6) {
+               __be32 info = addr->v6.sin6_flowinfo;
+
+               if (info) {
+                       peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
+                       peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+               } else {
+                       peer->flowlabel = asoc->flowlabel;
+               }
+       }
+       peer->dscp = asoc->dscp;
+
        /* Enable/disable heartbeat, SACK delay, and path MTU discovery
         * based on association setting.
         */
index 79daa98208c391c780440144d69bc7be875c3476..bfb9f812e2ef9fa605b08dc1f534781573c3abf8 100644 (file)
@@ -237,7 +237,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        /* Account for a different sized first fragment */
        if (msg_len >= first_len) {
                msg->can_delay = 0;
-               SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
+               if (msg_len > first_len)
+                       SCTP_INC_STATS(sock_net(asoc->base.sk),
+                                      SCTP_MIB_FRAGUSRMSGS);
        } else {
                /* Which may be the only one... */
                first_len = msg_len;
index ba8a6e6c36fae998b5590a803c90c28d8302d063..9bbc5f92c941948ee22d1a6095245c08bbd64244 100644 (file)
@@ -56,6 +56,7 @@
 #include <net/sctp/sm.h>
 #include <net/sctp/checksum.h>
 #include <net/net_namespace.h>
+#include <linux/rhashtable.h>
 
 /* Forward declarations for internal helpers. */
 static int sctp_rcv_ootb(struct sk_buff *);
index 7339918a805d93db8a94fed627f99962e07e3267..fc6c5e4bffa540069f70cf33bda2942d7143fcd4 100644 (file)
@@ -209,12 +209,17 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
        struct sock *sk = skb->sk;
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct flowi6 *fl6 = &transport->fl.u.ip6;
+       __u8 tclass = np->tclass;
        int res;
 
        pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
                 skb->len, &fl6->saddr, &fl6->daddr);
 
-       IP6_ECN_flow_xmit(sk, fl6->flowlabel);
+       if (transport->dscp & SCTP_DSCP_SET_MASK)
+               tclass = transport->dscp & SCTP_DSCP_VAL_MASK;
+
+       if (INET_ECN_is_capable(tclass))
+               IP6_ECN_flow_xmit(sk, fl6->flowlabel);
 
        if (!(transport->param_flags & SPP_PMTUD_ENABLE))
                skb->ignore_df = 1;
@@ -223,7 +228,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
 
        rcu_read_lock();
        res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
-                      np->tclass);
+                      tclass);
        rcu_read_unlock();
        return res;
 }
@@ -254,6 +259,17 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                fl6->flowi6_oif = daddr->v6.sin6_scope_id;
        else if (asoc)
                fl6->flowi6_oif = asoc->base.sk->sk_bound_dev_if;
+       if (t->flowlabel & SCTP_FLOWLABEL_SET_MASK)
+               fl6->flowlabel = htonl(t->flowlabel & SCTP_FLOWLABEL_VAL_MASK);
+
+       if (np->sndflow && (fl6->flowlabel & IPV6_FLOWLABEL_MASK)) {
+               struct ip6_flowlabel *flowlabel;
+
+               flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
+               if (!flowlabel)
+                       goto out;
+               fl6_sock_release(flowlabel);
+       }
 
        pr_debug("%s: dst=%pI6 ", __func__, &fl6->daddr);
 
@@ -1010,7 +1026,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = sctp_getname,
-       .poll_mask         = sctp_poll_mask,
+       .poll              = sctp_poll,
        .ioctl             = inet6_ioctl,
        .listen            = sctp_inet_listen,
        .shutdown          = inet_shutdown,
index 5dffbc4930086699cefa10f704de5fd2068169c8..e948db29ab539a588e8526d2f4fc22428a9f4685 100644 (file)
@@ -426,13 +426,16 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
        struct dst_entry *dst = NULL;
        union sctp_addr *daddr = &t->ipaddr;
        union sctp_addr dst_saddr;
+       __u8 tos = inet_sk(sk)->tos;
 
+       if (t->dscp & SCTP_DSCP_SET_MASK)
+               tos = t->dscp & SCTP_DSCP_VAL_MASK;
        memset(fl4, 0x0, sizeof(struct flowi4));
        fl4->daddr  = daddr->v4.sin_addr.s_addr;
        fl4->fl4_dport = daddr->v4.sin_port;
        fl4->flowi4_proto = IPPROTO_SCTP;
        if (asoc) {
-               fl4->flowi4_tos = RT_CONN_FLAGS(asoc->base.sk);
+               fl4->flowi4_tos = RT_CONN_FLAGS_TOS(asoc->base.sk, tos);
                fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if;
                fl4->fl4_sport = htons(asoc->base.bind_addr.port);
        }
@@ -495,7 +498,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                fl4->fl4_sport = laddr->a.v4.sin_port;
                flowi4_update_output(fl4,
                                     asoc->base.sk->sk_bound_dev_if,
-                                    RT_CONN_FLAGS(asoc->base.sk),
+                                    RT_CONN_FLAGS_TOS(asoc->base.sk, tos),
                                     daddr->v4.sin_addr.s_addr,
                                     laddr->a.v4.sin_addr.s_addr);
 
@@ -971,16 +974,21 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
                               struct sctp_transport *transport)
 {
        struct inet_sock *inet = inet_sk(skb->sk);
+       __u8 dscp = inet->tos;
 
        pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb,
-                skb->len, &transport->fl.u.ip4.saddr, &transport->fl.u.ip4.daddr);
+                skb->len, &transport->fl.u.ip4.saddr,
+                &transport->fl.u.ip4.daddr);
+
+       if (transport->dscp & SCTP_DSCP_SET_MASK)
+               dscp = transport->dscp & SCTP_DSCP_VAL_MASK;
 
        inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
                         IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
 
        SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS);
 
-       return ip_queue_xmit(&inet->sk, skb, &transport->fl);
+       return __ip_queue_xmit(&inet->sk, skb, &transport->fl, dscp);
 }
 
 static struct sctp_af sctp_af_inet;
@@ -1016,7 +1024,7 @@ static const struct proto_ops inet_seqpacket_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet_getname,      /* Semantics are different.  */
-       .poll_mask         = sctp_poll_mask,
+       .poll              = sctp_poll,
        .ioctl             = inet_ioctl,
        .listen            = sctp_inet_listen,
        .shutdown          = inet_shutdown,     /* Looks harmless.  */
index d20f7addee19ecb794fa85f9ed73e8b40784a095..502c0d7cb105e27306df08155527d147e4fdac5f 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/slab.h>
 #include <linux/file.h>
 #include <linux/compat.h>
+#include <linux/rhashtable.h>
 
 #include <net/ip.h>
 #include <net/icmp.h>
@@ -1696,6 +1697,7 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags,
        struct sctp_association *asoc;
        enum sctp_scope scope;
        struct cmsghdr *cmsg;
+       __be32 flowinfo = 0;
        struct sctp_af *af;
        int err;
 
@@ -1780,6 +1782,9 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags,
        if (!cmsgs->addrs_msg)
                return 0;
 
+       if (daddr->sa.sa_family == AF_INET6)
+               flowinfo = daddr->v6.sin6_flowinfo;
+
        /* sendv addr list parse */
        for_each_cmsghdr(cmsg, cmsgs->addrs_msg) {
                struct sctp_transport *transport;
@@ -1812,6 +1817,7 @@ static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags,
                        }
 
                        dlen = sizeof(struct in6_addr);
+                       daddr->v6.sin6_flowinfo = flowinfo;
                        daddr->v6.sin6_family = AF_INET6;
                        daddr->v6.sin6_port = htons(asoc->peer.port);
                        memcpy(&daddr->v6.sin6_addr, CMSG_DATA(cmsg), dlen);
@@ -2392,6 +2398,8 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
  *     uint32_t                spp_pathmtu;
  *     uint32_t                spp_sackdelay;
  *     uint32_t                spp_flags;
+ *     uint32_t                spp_ipv6_flowlabel;
+ *     uint8_t                 spp_dscp;
  * };
  *
  *   spp_assoc_id    - (one-to-many style socket) This is filled in the
@@ -2471,6 +2479,45 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
  *                     also that this field is mutually exclusive to
  *                     SPP_SACKDELAY_ENABLE, setting both will have undefined
  *                     results.
+ *
+ *                     SPP_IPV6_FLOWLABEL:  Setting this flag enables the
+ *                     setting of the IPV6 flow label value.  The value is
+ *                     contained in the spp_ipv6_flowlabel field.
+ *                     Upon retrieval, this flag will be set to indicate that
+ *                     the spp_ipv6_flowlabel field has a valid value returned.
+ *                     If a specific destination address is set (in the
+ *                     spp_address field), then the value returned is that of
+ *                     the address.  If just an association is specified (and
+ *                     no address), then the association's default flow label
+ *                     is returned.  If neither an association nor a destination
+ *                     is specified, then the socket's default flow label is
+ *                     returned.  For non-IPv6 sockets, this flag will be left
+ *                     cleared.
+ *
+ *                     SPP_DSCP:  Setting this flag enables the setting of the
+ *                     Differentiated Services Code Point (DSCP) value
+ *                     associated with either the association or a specific
+ *                     address.  The value is obtained in the spp_dscp field.
+ *                     Upon retrieval, this flag will be set to indicate that
+ *                     the spp_dscp field has a valid value returned.  If a
+ *                     specific destination address is set when called (in the
+ *                     spp_address field), then that specific destination
+ *                     address's DSCP value is returned.  If just an association
+ *                     is specified, then the association's default DSCP is
+ *                     returned.  If neither an association nor a destination is
+ *                     specified, then the socket's default DSCP is returned.
+ *
+ *   spp_ipv6_flowlabel
+ *                   - This field is used in conjunction with the
+ *                     SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
+ *                     The 20 least significant bits are used for the flow
+ *                     label.  This setting has precedence over any IPv6-layer
+ *                     setting.
+ *
+ *   spp_dscp        - This field is used in conjunction with the SPP_DSCP flag
+ *                     and contains the DSCP.  The 6 most significant bits are
+ *                     used for the DSCP.  This setting has precedence over any
+ *                     IPv4- or IPv6- layer setting.
  */
 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                                       struct sctp_transport   *trans,
@@ -2610,6 +2657,51 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                }
        }
 
+       if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
+               if (trans && trans->ipaddr.sa.sa_family == AF_INET6) {
+                       trans->flowlabel = params->spp_ipv6_flowlabel &
+                                          SCTP_FLOWLABEL_VAL_MASK;
+                       trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+               } else if (asoc) {
+                       list_for_each_entry(trans,
+                                           &asoc->peer.transport_addr_list,
+                                           transports) {
+                               if (trans->ipaddr.sa.sa_family != AF_INET6)
+                                       continue;
+                               trans->flowlabel = params->spp_ipv6_flowlabel &
+                                                  SCTP_FLOWLABEL_VAL_MASK;
+                               trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+                       }
+                       asoc->flowlabel = params->spp_ipv6_flowlabel &
+                                         SCTP_FLOWLABEL_VAL_MASK;
+                       asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+               } else if (sctp_opt2sk(sp)->sk_family == AF_INET6) {
+                       sp->flowlabel = params->spp_ipv6_flowlabel &
+                                       SCTP_FLOWLABEL_VAL_MASK;
+                       sp->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+               }
+       }
+
+       if (params->spp_flags & SPP_DSCP) {
+               if (trans) {
+                       trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
+                       trans->dscp |= SCTP_DSCP_SET_MASK;
+               } else if (asoc) {
+                       list_for_each_entry(trans,
+                                           &asoc->peer.transport_addr_list,
+                                           transports) {
+                               trans->dscp = params->spp_dscp &
+                                             SCTP_DSCP_VAL_MASK;
+                               trans->dscp |= SCTP_DSCP_SET_MASK;
+                       }
+                       asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
+                       asoc->dscp |= SCTP_DSCP_SET_MASK;
+               } else {
+                       sp->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
+                       sp->dscp |= SCTP_DSCP_SET_MASK;
+               }
+       }
+
        return 0;
 }
 
@@ -2624,11 +2716,18 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
        int error;
        int hb_change, pmtud_change, sackdelay_change;
 
-       if (optlen != sizeof(struct sctp_paddrparams))
+       if (optlen == sizeof(params)) {
+               if (copy_from_user(&params, optval, optlen))
+                       return -EFAULT;
+       } else if (optlen == ALIGN(offsetof(struct sctp_paddrparams,
+                                           spp_ipv6_flowlabel), 4)) {
+               if (copy_from_user(&params, optval, optlen))
+                       return -EFAULT;
+               if (params.spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL))
+                       return -EINVAL;
+       } else {
                return -EINVAL;
-
-       if (copy_from_user(&params, optval, optlen))
-               return -EFAULT;
+       }
 
        /* Validate flags and value parameters. */
        hb_change        = params.spp_flags & SPP_HB;
@@ -4169,6 +4268,28 @@ static int sctp_setsockopt_interleaving_supported(struct sock *sk,
        return retval;
 }
 
+static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval,
+                                     unsigned int optlen)
+{
+       int val;
+
+       if (!sctp_style(sk, TCP))
+               return -EOPNOTSUPP;
+
+       if (sctp_sk(sk)->ep->base.bind_addr.port)
+               return -EFAULT;
+
+       if (optlen < sizeof(int))
+               return -EINVAL;
+
+       if (get_user(val, (int __user *)optval))
+               return -EFAULT;
+
+       sctp_sk(sk)->reuse = !!val;
+
+       return 0;
+}
+
 /* API 6.2 setsockopt(), getsockopt()
  *
  * Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4363,6 +4484,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
                retval = sctp_setsockopt_interleaving_supported(sk, optval,
                                                                optlen);
                break;
+       case SCTP_REUSE_PORT:
+               retval = sctp_setsockopt_reuse_port(sk, optval, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
@@ -5427,6 +5551,45 @@ static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len,
  *                     also that this field is mutually exclusive to
  *                     SPP_SACKDELAY_ENABLE, setting both will have undefined
  *                     results.
+ *
+ *                     SPP_IPV6_FLOWLABEL:  Setting this flag enables the
+ *                     setting of the IPV6 flow label value.  The value is
+ *                     contained in the spp_ipv6_flowlabel field.
+ *                     Upon retrieval, this flag will be set to indicate that
+ *                     the spp_ipv6_flowlabel field has a valid value returned.
+ *                     If a specific destination address is set (in the
+ *                     spp_address field), then the value returned is that of
+ *                     the address.  If just an association is specified (and
+ *                     no address), then the association's default flow label
+ *                     is returned.  If neither an association nor a destination
+ *                     is specified, then the socket's default flow label is
+ *                     returned.  For non-IPv6 sockets, this flag will be left
+ *                     cleared.
+ *
+ *                     SPP_DSCP:  Setting this flag enables the setting of the
+ *                     Differentiated Services Code Point (DSCP) value
+ *                     associated with either the association or a specific
+ *                     address.  The value is obtained in the spp_dscp field.
+ *                     Upon retrieval, this flag will be set to indicate that
+ *                     the spp_dscp field has a valid value returned.  If a
+ *                     specific destination address is set when called (in the
+ *                     spp_address field), then that specific destination
+ *                     address's DSCP value is returned.  If just an association
+ *                     is specified, then the association's default DSCP is
+ *                     returned.  If neither an association nor a destination is
+ *                     specified, then the socket's default DSCP is returned.
+ *
+ *   spp_ipv6_flowlabel
+ *                   - This field is used in conjunction with the
+ *                     SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
+ *                     The 20 least significant bits are used for the flow
+ *                     label.  This setting has precedence over any IPv6-layer
+ *                     setting.
+ *
+ *   spp_dscp        - This field is used in conjunction with the SPP_DSCP flag
+ *                     and contains the DSCP.  The 6 most significant bits are
+ *                     used for the DSCP.  This setting has precedence over any
+ *                     IPv4- or IPv6- layer setting.
  */
 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
                                            char __user *optval, int __user *optlen)
@@ -5436,9 +5599,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
        struct sctp_association *asoc = NULL;
        struct sctp_sock        *sp = sctp_sk(sk);
 
-       if (len < sizeof(struct sctp_paddrparams))
+       if (len >= sizeof(params))
+               len = sizeof(params);
+       else if (len >= ALIGN(offsetof(struct sctp_paddrparams,
+                                      spp_ipv6_flowlabel), 4))
+               len = ALIGN(offsetof(struct sctp_paddrparams,
+                                    spp_ipv6_flowlabel), 4);
+       else
                return -EINVAL;
-       len = sizeof(struct sctp_paddrparams);
+
        if (copy_from_user(&params, optval, len))
                return -EFAULT;
 
@@ -5473,6 +5642,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
 
                /*draft-11 doesn't say what to return in spp_flags*/
                params.spp_flags      = trans->param_flags;
+               if (trans->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
+                       params.spp_ipv6_flowlabel = trans->flowlabel &
+                                                   SCTP_FLOWLABEL_VAL_MASK;
+                       params.spp_flags |= SPP_IPV6_FLOWLABEL;
+               }
+               if (trans->dscp & SCTP_DSCP_SET_MASK) {
+                       params.spp_dscp = trans->dscp & SCTP_DSCP_VAL_MASK;
+                       params.spp_flags |= SPP_DSCP;
+               }
        } else if (asoc) {
                /* Fetch association values. */
                params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
@@ -5482,6 +5660,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
 
                /*draft-11 doesn't say what to return in spp_flags*/
                params.spp_flags      = asoc->param_flags;
+               if (asoc->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
+                       params.spp_ipv6_flowlabel = asoc->flowlabel &
+                                                   SCTP_FLOWLABEL_VAL_MASK;
+                       params.spp_flags |= SPP_IPV6_FLOWLABEL;
+               }
+               if (asoc->dscp & SCTP_DSCP_SET_MASK) {
+                       params.spp_dscp = asoc->dscp & SCTP_DSCP_VAL_MASK;
+                       params.spp_flags |= SPP_DSCP;
+               }
        } else {
                /* Fetch socket values. */
                params.spp_hbinterval = sp->hbinterval;
@@ -5491,6 +5678,15 @@ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
 
                /*draft-11 doesn't say what to return in spp_flags*/
                params.spp_flags      = sp->param_flags;
+               if (sp->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
+                       params.spp_ipv6_flowlabel = sp->flowlabel &
+                                                   SCTP_FLOWLABEL_VAL_MASK;
+                       params.spp_flags |= SPP_IPV6_FLOWLABEL;
+               }
+               if (sp->dscp & SCTP_DSCP_SET_MASK) {
+                       params.spp_dscp = sp->dscp & SCTP_DSCP_VAL_MASK;
+                       params.spp_flags |= SPP_DSCP;
+               }
        }
 
        if (copy_to_user(optval, &params, len))
@@ -7196,6 +7392,26 @@ static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
        return retval;
 }
 
+static int sctp_getsockopt_reuse_port(struct sock *sk, int len,
+                                     char __user *optval,
+                                     int __user *optlen)
+{
+       int val;
+
+       if (len < sizeof(int))
+               return -EINVAL;
+
+       len = sizeof(int);
+       val = sctp_sk(sk)->reuse;
+       if (put_user(len, optlen))
+               return -EFAULT;
+
+       if (copy_to_user(optval, &val, len))
+               return -EFAULT;
+
+       return 0;
+}
+
 static int sctp_getsockopt(struct sock *sk, int level, int optname,
                           char __user *optval, int __user *optlen)
 {
@@ -7391,6 +7607,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
                retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
                                                                optlen);
                break;
+       case SCTP_REUSE_PORT:
+               retval = sctp_getsockopt_reuse_port(sk, len, optval, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
@@ -7428,6 +7647,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
 
 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
 {
+       bool reuse = (sk->sk_reuse || sctp_sk(sk)->reuse);
        struct sctp_bind_hashbucket *head; /* hash list */
        struct sctp_bind_bucket *pp;
        unsigned short snum;
@@ -7500,13 +7720,11 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                 * used by other socket (pp->owner not empty); that other
                 * socket is going to be sk2.
                 */
-               int reuse = sk->sk_reuse;
                struct sock *sk2;
 
                pr_debug("%s: found a possible match\n", __func__);
 
-               if (pp->fastreuse && sk->sk_reuse &&
-                       sk->sk_state != SCTP_SS_LISTENING)
+               if (pp->fastreuse && reuse && sk->sk_state != SCTP_SS_LISTENING)
                        goto success;
 
                /* Run through the list of sockets bound to the port
@@ -7524,7 +7742,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                        ep2 = sctp_sk(sk2)->ep;
 
                        if (sk == sk2 ||
-                           (reuse && sk2->sk_reuse &&
+                           (reuse && (sk2->sk_reuse || sctp_sk(sk2)->reuse) &&
                             sk2->sk_state != SCTP_SS_LISTENING))
                                continue;
 
@@ -7548,12 +7766,12 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
         * SO_REUSEADDR on this socket -sk-).
         */
        if (hlist_empty(&pp->owner)) {
-               if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING)
+               if (reuse && sk->sk_state != SCTP_SS_LISTENING)
                        pp->fastreuse = 1;
                else
                        pp->fastreuse = 0;
        } else if (pp->fastreuse &&
-               (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING))
+                  (!reuse || sk->sk_state == SCTP_SS_LISTENING))
                pp->fastreuse = 0;
 
        /* We are set, so fill up all the data in the hash table
@@ -7684,7 +7902,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
                err = 0;
                sctp_unhash_endpoint(ep);
                sk->sk_state = SCTP_SS_CLOSED;
-               if (sk->sk_reuse)
+               if (sk->sk_reuse || sctp_sk(sk)->reuse)
                        sctp_sk(sk)->bind_hash->fastreuse = 1;
                goto out;
        }
@@ -7717,12 +7935,14 @@ int sctp_inet_listen(struct socket *sock, int backlog)
  * here, again, by modeling the current TCP/UDP code.  We don't have
  * a good way to test with it yet.
  */
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct sctp_sock *sp = sctp_sk(sk);
        __poll_t mask;
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        sock_rps_record_flow(sk);
 
        /* A TCP-style listening socket becomes readable when the accept queue
@@ -8549,6 +8769,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
        newsk->sk_no_check_tx = sk->sk_no_check_tx;
        newsk->sk_no_check_rx = sk->sk_no_check_rx;
        newsk->sk_reuse = sk->sk_reuse;
+       sctp_sk(newsk)->reuse = sp->reuse;
 
        newsk->sk_shutdown = sk->sk_shutdown;
        newsk->sk_destruct = sctp_destruct_sock;
index 188104654b545b4a2c28495d0fca9cf9b020743b..4df96b4b8130908a0bf16a2a0d997ddc51bef3d9 100644 (file)
@@ -1,4 +1,4 @@
 obj-$(CONFIG_SMC)      += smc.o
 obj-$(CONFIG_SMC_DIAG) += smc_diag.o
 smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
-smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o
+smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o
index da7f02edcd374c44437e34a2705f410317ea536d..f3fdf3714f8b871398f13b92a5ed4385291f8878 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/workqueue.h>
 #include <linux/in.h>
 #include <linux/sched/signal.h>
+#include <linux/if_vlan.h>
 
 #include <net/sock.h>
 #include <net/tcp.h>
@@ -35,6 +36,7 @@
 #include "smc_cdc.h"
 #include "smc_core.h"
 #include "smc_ib.h"
+#include "smc_ism.h"
 #include "smc_pnet.h"
 #include "smc_tx.h"
 #include "smc_rx.h"
@@ -45,6 +47,7 @@ static DEFINE_MUTEX(smc_create_lgr_pending);  /* serialize link group
                                                 */
 
 static void smc_tcp_listen_work(struct work_struct *);
+static void smc_connect_work(struct work_struct *);
 
 static void smc_set_keepalive(struct sock *sk, int val)
 {
@@ -122,6 +125,12 @@ static int smc_release(struct socket *sock)
                goto out;
 
        smc = smc_sk(sk);
+
+       /* cleanup for a dangling non-blocking connect */
+       flush_work(&smc->connect_work);
+       kfree(smc->connect_info);
+       smc->connect_info = NULL;
+
        if (sk->sk_state == SMC_LISTEN)
                /* smc_close_non_accepted() is called and acquires
                 * sock lock for child sockets again
@@ -186,6 +195,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
        sk->sk_protocol = protocol;
        smc = smc_sk(sk);
        INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
+       INIT_WORK(&smc->connect_work, smc_connect_work);
        INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
        INIT_LIST_HEAD(&smc->accept_q);
        spin_lock_init(&smc->accept_q_lock);
@@ -372,8 +382,8 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
        return 0;
 }
 
-static void smc_conn_save_peer_info(struct smc_sock *smc,
-                                   struct smc_clc_msg_accept_confirm *clc)
+static void smcr_conn_save_peer_info(struct smc_sock *smc,
+                                    struct smc_clc_msg_accept_confirm *clc)
 {
        int bufsize = smc_uncompress_bufsize(clc->rmbe_size);
 
@@ -384,6 +394,28 @@ static void smc_conn_save_peer_info(struct smc_sock *smc,
        smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
 }
 
+static void smcd_conn_save_peer_info(struct smc_sock *smc,
+                                    struct smc_clc_msg_accept_confirm *clc)
+{
+       int bufsize = smc_uncompress_bufsize(clc->dmbe_size);
+
+       smc->conn.peer_rmbe_idx = clc->dmbe_idx;
+       smc->conn.peer_token = clc->token;
+       /* msg header takes up space in the buffer */
+       smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
+       atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
+       smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
+}
+
+static void smc_conn_save_peer_info(struct smc_sock *smc,
+                                   struct smc_clc_msg_accept_confirm *clc)
+{
+       if (smc->conn.lgr->is_smcd)
+               smcd_conn_save_peer_info(smc, clc);
+       else
+               smcr_conn_save_peer_info(smc, clc);
+}
+
 static void smc_link_save_peer_info(struct smc_link *link,
                                    struct smc_clc_msg_accept_confirm *clc)
 {
@@ -450,15 +482,51 @@ static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev,
        return reason_code;
 }
 
+/* check if there is an ISM device available for this connection. */
+/* called for connect and listen */
+static int smc_check_ism(struct smc_sock *smc, struct smcd_dev **ismdev)
+{
+       /* Find ISM device with same PNETID as connecting interface  */
+       smc_pnet_find_ism_resource(smc->clcsock->sk, ismdev);
+       if (!(*ismdev))
+               return SMC_CLC_DECL_CNFERR; /* configuration error */
+       return 0;
+}
+
+/* Check for VLAN ID and register it on ISM device just for CLC handshake */
+static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
+                                     struct smcd_dev *ismdev,
+                                     unsigned short vlan_id)
+{
+       if (vlan_id && smc_ism_get_vlan(ismdev, vlan_id))
+               return SMC_CLC_DECL_CNFERR;
+       return 0;
+}
+
+/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
+ * used, the VLAN ID will be registered again during the connection setup.
+ */
+static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd,
+                                       struct smcd_dev *ismdev,
+                                       unsigned short vlan_id)
+{
+       if (!is_smcd)
+               return 0;
+       if (vlan_id && smc_ism_put_vlan(ismdev, vlan_id))
+               return SMC_CLC_DECL_CNFERR;
+       return 0;
+}
+
 /* CLC handshake during connect */
-static int smc_connect_clc(struct smc_sock *smc,
+static int smc_connect_clc(struct smc_sock *smc, int smc_type,
                           struct smc_clc_msg_accept_confirm *aclc,
-                          struct smc_ib_device *ibdev, u8 ibport)
+                          struct smc_ib_device *ibdev, u8 ibport,
+                          struct smcd_dev *ismdev)
 {
        int rc = 0;
 
        /* do inband token exchange */
-       rc = smc_clc_send_proposal(smc, ibdev, ibport);
+       rc = smc_clc_send_proposal(smc, smc_type, ibdev, ibport, ismdev);
        if (rc)
                return rc;
        /* receive SMC Accept CLC message */
@@ -475,8 +543,8 @@ static int smc_connect_rdma(struct smc_sock *smc,
        int reason_code = 0;
 
        mutex_lock(&smc_create_lgr_pending);
-       local_contact = smc_conn_create(smc, ibdev, ibport, &aclc->lcl,
-                                       aclc->hdr.flag);
+       local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev,
+                                       ibport, &aclc->lcl, NULL, 0);
        if (local_contact < 0) {
                if (local_contact == -ENOMEM)
                        reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
@@ -491,7 +559,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
        smc_conn_save_peer_info(smc, aclc);
 
        /* create send buffer and rmb */
-       if (smc_buf_create(smc))
+       if (smc_buf_create(smc, false))
                return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact);
 
        if (local_contact == SMC_FIRST_CONTACT)
@@ -538,11 +606,50 @@ static int smc_connect_rdma(struct smc_sock *smc,
        return 0;
 }
 
+/* setup for ISM connection of client */
+static int smc_connect_ism(struct smc_sock *smc,
+                          struct smc_clc_msg_accept_confirm *aclc,
+                          struct smcd_dev *ismdev)
+{
+       int local_contact = SMC_FIRST_CONTACT;
+       int rc = 0;
+
+       mutex_lock(&smc_create_lgr_pending);
+       local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0,
+                                       NULL, ismdev, aclc->gid);
+       if (local_contact < 0)
+               return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0);
+
+       /* Create send and receive buffers */
+       if (smc_buf_create(smc, true))
+               return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact);
+
+       smc_conn_save_peer_info(smc, aclc);
+       smc_close_init(smc);
+       smc_rx_init(smc);
+       smc_tx_init(smc);
+
+       rc = smc_clc_send_confirm(smc);
+       if (rc)
+               return smc_connect_abort(smc, rc, local_contact);
+       mutex_unlock(&smc_create_lgr_pending);
+
+       smc_copy_sock_settings_to_clc(smc);
+       if (smc->sk.sk_state == SMC_INIT)
+               smc->sk.sk_state = SMC_ACTIVE;
+
+       return 0;
+}
+
 /* perform steps before actually connecting */
 static int __smc_connect(struct smc_sock *smc)
 {
+       bool ism_supported = false, rdma_supported = false;
        struct smc_clc_msg_accept_confirm aclc;
        struct smc_ib_device *ibdev;
+       struct smcd_dev *ismdev;
+       unsigned short vlan;
+       int smc_type;
        int rc = 0;
        u8 ibport;
 
@@ -559,23 +666,84 @@ static int __smc_connect(struct smc_sock *smc)
        if (using_ipsec(smc))
                return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
 
-       /* check if a RDMA device is available; if not, fall back */
-       if (smc_check_rdma(smc, &ibdev, &ibport))
+       /* check for VLAN ID */
+       if (smc_vlan_by_tcpsk(smc->clcsock, &vlan))
+               return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR);
+
+       /* check if there is an ism device available */
+       if (!smc_check_ism(smc, &ismdev) &&
+           !smc_connect_ism_vlan_setup(smc, ismdev, vlan)) {
+               /* ISM is supported for this connection */
+               ism_supported = true;
+               smc_type = SMC_TYPE_D;
+       }
+
+       /* check if there is a rdma device available */
+       if (!smc_check_rdma(smc, &ibdev, &ibport)) {
+               /* RDMA is supported for this connection */
+               rdma_supported = true;
+               if (ism_supported)
+                       smc_type = SMC_TYPE_B; /* both */
+               else
+                       smc_type = SMC_TYPE_R; /* only RDMA */
+       }
+
+       /* if neither ISM nor RDMA are supported, fallback */
+       if (!rdma_supported && !ism_supported)
                return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR);
 
        /* perform CLC handshake */
-       rc = smc_connect_clc(smc, &aclc, ibdev, ibport);
-       if (rc)
+       rc = smc_connect_clc(smc, smc_type, &aclc, ibdev, ibport, ismdev);
+       if (rc) {
+               smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan);
                return smc_connect_decline_fallback(smc, rc);
+       }
 
-       /* connect using rdma */
-       rc = smc_connect_rdma(smc, &aclc, ibdev, ibport);
-       if (rc)
+       /* depending on previous steps, connect using rdma or ism */
+       if (rdma_supported && aclc.hdr.path == SMC_TYPE_R)
+               rc = smc_connect_rdma(smc, &aclc, ibdev, ibport);
+       else if (ism_supported && aclc.hdr.path == SMC_TYPE_D)
+               rc = smc_connect_ism(smc, &aclc, ismdev);
+       else
+               rc = SMC_CLC_DECL_CNFERR;
+       if (rc) {
+               smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan);
                return smc_connect_decline_fallback(smc, rc);
+       }
 
+       smc_connect_ism_vlan_cleanup(smc, ism_supported, ismdev, vlan);
        return 0;
 }
 
+static void smc_connect_work(struct work_struct *work)
+{
+       struct smc_sock *smc = container_of(work, struct smc_sock,
+                                           connect_work);
+       int rc;
+
+       lock_sock(&smc->sk);
+       rc = kernel_connect(smc->clcsock, &smc->connect_info->addr,
+                           smc->connect_info->alen, smc->connect_info->flags);
+       if (smc->clcsock->sk->sk_err) {
+               smc->sk.sk_err = smc->clcsock->sk->sk_err;
+               goto out;
+       }
+       if (rc < 0) {
+               smc->sk.sk_err = -rc;
+               goto out;
+       }
+
+       rc = __smc_connect(smc);
+       if (rc < 0)
+               smc->sk.sk_err = -rc;
+
+out:
+       smc->sk.sk_state_change(&smc->sk);
+       kfree(smc->connect_info);
+       smc->connect_info = NULL;
+       release_sock(&smc->sk);
+}
+
 static int smc_connect(struct socket *sock, struct sockaddr *addr,
                       int alen, int flags)
 {
@@ -605,15 +773,32 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
 
        smc_copy_sock_settings_to_clc(smc);
        tcp_sk(smc->clcsock->sk)->syn_smc = 1;
-       rc = kernel_connect(smc->clcsock, addr, alen, flags);
-       if (rc)
-               goto out;
+       if (flags & O_NONBLOCK) {
+               if (smc->connect_info) {
+                       rc = -EALREADY;
+                       goto out;
+               }
+               smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL);
+               if (!smc->connect_info) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               smc->connect_info->alen = alen;
+               smc->connect_info->flags = flags ^ O_NONBLOCK;
+               memcpy(&smc->connect_info->addr, addr, alen);
+               schedule_work(&smc->connect_work);
+               rc = -EINPROGRESS;
+       } else {
+               rc = kernel_connect(smc->clcsock, addr, alen, flags);
+               if (rc)
+                       goto out;
 
-       rc = __smc_connect(smc);
-       if (rc < 0)
-               goto out;
-       else
-               rc = 0; /* success cases including fallback */
+               rc = __smc_connect(smc);
+               if (rc < 0)
+                       goto out;
+               else
+                       rc = 0; /* success cases including fallback */
+       }
 
 out:
        release_sock(sk);
@@ -894,7 +1079,8 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
                                int *local_contact)
 {
        /* allocate connection / link group */
-       *local_contact = smc_conn_create(new_smc, ibdev, ibport, &pclc->lcl, 0);
+       *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport,
+                                        &pclc->lcl, NULL, 0);
        if (*local_contact < 0) {
                if (*local_contact == -ENOMEM)
                        return SMC_CLC_DECL_MEM;/* insufficient memory*/
@@ -902,8 +1088,46 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
        }
 
        /* create send buffer and rmb */
-       if (smc_buf_create(new_smc))
+       if (smc_buf_create(new_smc, false))
+               return SMC_CLC_DECL_MEM;
+
+       return 0;
+}
+
+/* listen worker: initialize connection and buffers for SMC-D */
+static int smc_listen_ism_init(struct smc_sock *new_smc,
+                              struct smc_clc_msg_proposal *pclc,
+                              struct smcd_dev *ismdev,
+                              int *local_contact)
+{
+       struct smc_clc_msg_smcd *pclc_smcd;
+
+       pclc_smcd = smc_get_clc_msg_smcd(pclc);
+       *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL,
+                                        ismdev, pclc_smcd->gid);
+       if (*local_contact < 0) {
+               if (*local_contact == -ENOMEM)
+                       return SMC_CLC_DECL_MEM;/* insufficient memory*/
+               return SMC_CLC_DECL_INTERR; /* other error */
+       }
+
+       /* Check if peer can be reached via ISM device */
+       if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid,
+                           new_smc->conn.lgr->vlan_id,
+                           new_smc->conn.lgr->smcd)) {
+               if (*local_contact == SMC_FIRST_CONTACT)
+                       smc_lgr_forget(new_smc->conn.lgr);
+               smc_conn_free(&new_smc->conn);
+               return SMC_CLC_DECL_CNFERR;
+       }
+
+       /* Create send and receive buffers */
+       if (smc_buf_create(new_smc, true)) {
+               if (*local_contact == SMC_FIRST_CONTACT)
+                       smc_lgr_forget(new_smc->conn.lgr);
+               smc_conn_free(&new_smc->conn);
                return SMC_CLC_DECL_MEM;
+       }
 
        return 0;
 }
@@ -966,6 +1190,8 @@ static void smc_listen_work(struct work_struct *work)
        struct smc_clc_msg_accept_confirm cclc;
        struct smc_clc_msg_proposal *pclc;
        struct smc_ib_device *ibdev;
+       bool ism_supported = false;
+       struct smcd_dev *ismdev;
        u8 buf[SMC_CLC_MAX_LEN];
        int local_contact = 0;
        int reason_code = 0;
@@ -1006,12 +1232,21 @@ static void smc_listen_work(struct work_struct *work)
        smc_rx_init(new_smc);
        smc_tx_init(new_smc);
 
+       /* check if ISM is available */
+       if ((pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) &&
+           !smc_check_ism(new_smc, &ismdev) &&
+           !smc_listen_ism_init(new_smc, pclc, ismdev, &local_contact)) {
+               ism_supported = true;
+       }
+
        /* check if RDMA is available */
-       if (smc_check_rdma(new_smc, &ibdev, &ibport) ||
-           smc_listen_rdma_check(new_smc, pclc) ||
-           smc_listen_rdma_init(new_smc, pclc, ibdev, ibport,
-                                &local_contact) ||
-           smc_listen_rdma_reg(new_smc, local_contact)) {
+       if (!ism_supported &&
+           ((pclc->hdr.path != SMC_TYPE_R && pclc->hdr.path != SMC_TYPE_B) ||
+            smc_check_rdma(new_smc, &ibdev, &ibport) ||
+            smc_listen_rdma_check(new_smc, pclc) ||
+            smc_listen_rdma_init(new_smc, pclc, ibdev, ibport,
+                                 &local_contact) ||
+            smc_listen_rdma_reg(new_smc, local_contact))) {
                /* SMC not supported, decline */
                mutex_unlock(&smc_create_lgr_pending);
                smc_listen_decline(new_smc, SMC_CLC_DECL_CNFERR, local_contact);
@@ -1036,7 +1271,8 @@ static void smc_listen_work(struct work_struct *work)
        }
 
        /* finish worker */
-       smc_listen_rdma_finish(new_smc, &cclc, local_contact);
+       if (!ism_supported)
+               smc_listen_rdma_finish(new_smc, &cclc, local_contact);
        smc_conn_save_peer_info(new_smc, &cclc);
        mutex_unlock(&smc_create_lgr_pending);
        smc_listen_out_connected(new_smc);
@@ -1273,39 +1509,23 @@ static __poll_t smc_accept_poll(struct sock *parent)
        return mask;
 }
 
-static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t smc_poll(struct file *file, struct socket *sock,
+                            poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
        struct smc_sock *smc;
-       int rc;
 
        if (!sk)
                return EPOLLNVAL;
 
        smc = smc_sk(sock->sk);
-       sock_hold(sk);
-       lock_sock(sk);
        if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
                /* delegate to CLC child sock */
-               release_sock(sk);
-               mask = smc->clcsock->ops->poll_mask(smc->clcsock, events);
-               lock_sock(sk);
+               mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
                sk->sk_err = smc->clcsock->sk->sk_err;
-               if (sk->sk_err) {
+               if (sk->sk_err)
                        mask |= EPOLLERR;
-               } else {
-                       /* if non-blocking connect finished ... */
-                       if (sk->sk_state == SMC_INIT &&
-                           mask & EPOLLOUT &&
-                           smc->clcsock->sk->sk_state != TCP_CLOSE) {
-                               rc = __smc_connect(smc);
-                               if (rc < 0)
-                                       mask |= EPOLLERR;
-                               /* success cases including fallback */
-                               mask |= EPOLLOUT | EPOLLWRNORM;
-                       }
-               }
        } else {
                if (sk->sk_err)
                        mask |= EPOLLERR;
@@ -1334,8 +1554,6 @@ static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
                        mask |= EPOLLPRI;
 
        }
-       release_sock(sk);
-       sock_put(sk);
 
        return mask;
 }
@@ -1619,7 +1837,7 @@ static const struct proto_ops smc_sock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = smc_accept,
        .getname        = smc_getname,
-       .poll_mask      = smc_poll_mask,
+       .poll           = smc_poll,
        .ioctl          = smc_ioctl,
        .listen         = smc_listen,
        .shutdown       = smc_shutdown,
index 51ae1f10d81aa9390e76e392096e3f93c15b65fe..be20acd7b5aba34161edec7eaad18456abcb8ffb 100644 (file)
@@ -21,8 +21,6 @@
 #define SMCPROTO_SMC           0       /* SMC protocol, IPv4 */
 #define SMCPROTO_SMC6          1       /* SMC protocol, IPv6 */
 
-#define SMC_MAX_PORTS          2       /* Max # of ports */
-
 extern struct proto smc_proto;
 extern struct proto smc_proto6;
 
@@ -185,6 +183,17 @@ struct smc_connection {
        spinlock_t              acurs_lock;     /* protect cursors */
 #endif
        struct work_struct      close_work;     /* peer sent some closing */
+       struct tasklet_struct   rx_tsklet;      /* Receiver tasklet for SMC-D */
+       u8                      rx_off;         /* receive offset:
+                                                * 0 for SMC-R, 32 for SMC-D
+                                                */
+       u64                     peer_token;     /* SMC-D token of peer */
+};
+
+struct smc_connect_info {
+       int                     flags;
+       int                     alen;
+       struct sockaddr         addr;
 };
 
 struct smc_sock {                              /* smc sock container */
@@ -192,6 +201,8 @@ struct smc_sock {                           /* smc sock container */
        struct socket           *clcsock;       /* internal tcp socket */
        struct smc_connection   conn;           /* smc connection */
        struct smc_sock         *listen_smc;    /* listen parent */
+       struct smc_connect_info *connect_info;  /* connect address & flags */
+       struct work_struct      connect_work;   /* handle non-blocking connect*/
        struct work_struct      tcp_listen_work;/* handle tcp socket accepts */
        struct work_struct      smc_listen_work;/* prepare new accept socket */
        struct list_head        accept_q;       /* sockets to be accepted */
index a7e8d63fc8aebe61c094c232d4b6f31439eed3e2..621d8cca570ba38568295fdaaef0380830ab0990 100644 (file)
@@ -117,7 +117,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
        return rc;
 }
 
-int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
+static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
 {
        struct smc_cdc_tx_pend *pend;
        struct smc_wr_buf *wr_buf;
@@ -130,6 +130,21 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
        return smc_cdc_msg_send(conn, wr_buf, pend);
 }
 
+int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
+{
+       int rc;
+
+       if (conn->lgr->is_smcd) {
+               spin_lock_bh(&conn->send_lock);
+               rc = smcd_cdc_msg_send(conn);
+               spin_unlock_bh(&conn->send_lock);
+       } else {
+               rc = smcr_cdc_get_slot_and_msg_send(conn);
+       }
+
+       return rc;
+}
+
 static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
                              unsigned long data)
 {
@@ -157,6 +172,45 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
                                (unsigned long)conn);
 }
 
+/* Send a SMC-D CDC header.
+ * This increments the free space available in our send buffer.
+ * Also update the confirmed receive buffer with what was sent to the peer.
+ */
+int smcd_cdc_msg_send(struct smc_connection *conn)
+{
+       struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+       struct smcd_cdc_msg cdc;
+       int rc, diff;
+
+       memset(&cdc, 0, sizeof(cdc));
+       cdc.common.type = SMC_CDC_MSG_TYPE;
+       cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap;
+       cdc.prod_count = conn->local_tx_ctrl.prod.count;
+
+       cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap;
+       cdc.cons_count = conn->local_tx_ctrl.cons.count;
+       cdc.prod_flags = conn->local_tx_ctrl.prod_flags;
+       cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
+       rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
+       if (rc)
+               return rc;
+       smc_curs_write(&conn->rx_curs_confirmed,
+                      smc_curs_read(&conn->local_tx_ctrl.cons, conn), conn);
+       /* Calculate transmitted data and increment free send buffer space */
+       diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
+                            &conn->tx_curs_sent);
+       /* increased by confirmed number of bytes */
+       smp_mb__before_atomic();
+       atomic_add(diff, &conn->sndbuf_space);
+       /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
+       smp_mb__after_atomic();
+       smc_curs_write(&conn->tx_curs_fin,
+                      smc_curs_read(&conn->tx_curs_sent, conn), conn);
+
+       smc_tx_sndbuf_nonfull(smc);
+       return rc;
+}
+
 /********************************* receive ***********************************/
 
 static inline bool smc_cdc_before(u16 seq1, u16 seq2)
@@ -178,7 +232,7 @@ static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc,
        if (!sock_flag(&smc->sk, SOCK_URGINLINE))
                /* we'll skip the urgent byte, so don't account for it */
                (*diff_prod)--;
-       base = (char *)conn->rmb_desc->cpu_addr;
+       base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off;
        if (conn->urg_curs.count)
                conn->urg_rx_byte = *(base + conn->urg_curs.count - 1);
        else
@@ -276,6 +330,34 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
        sock_put(&smc->sk); /* no free sk in softirq-context */
 }
 
+/* Schedule a tasklet for this connection. Triggered from the ISM device IRQ
+ * handler to indicate update in the DMBE.
+ *
+ * Context:
+ * - tasklet context
+ */
+static void smcd_cdc_rx_tsklet(unsigned long data)
+{
+       struct smc_connection *conn = (struct smc_connection *)data;
+       struct smcd_cdc_msg cdc;
+       struct smc_sock *smc;
+
+       if (!conn)
+               return;
+
+       memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc));
+       smc = container_of(conn, struct smc_sock, conn);
+       smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
+}
+
+/* Initialize receive tasklet. Called from ISM device IRQ handler to start
+ * receiver side.
+ */
+void smcd_cdc_rx_init(struct smc_connection *conn)
+{
+       tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn);
+}
+
 /***************************** init, exit, misc ******************************/
 
 static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
index f60082fee5b8750c92a6c5f9dfa69d9a2d151ce6..8fbce4fee3e4a106244d6de7f71c7d4b7ec330f5 100644 (file)
@@ -50,6 +50,20 @@ struct smc_cdc_msg {
        u8                              reserved[18];
 } __packed;                                    /* format defined in RFC7609 */
 
+/* CDC message for SMC-D */
+struct smcd_cdc_msg {
+       struct smc_wr_rx_hdr common;    /* Type = 0xFE */
+       u8 res1[7];
+       u16 prod_wrap;
+       u32 prod_count;
+       u8 res2[2];
+       u16 cons_wrap;
+       u32 cons_count;
+       struct smc_cdc_producer_flags   prod_flags;
+       struct smc_cdc_conn_state_flags conn_state_flags;
+       u8 res3[8];
+} __packed;
+
 static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
 {
        return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort ||
@@ -204,9 +218,9 @@ static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local,
        smc_curs_write(local, smc_curs_read(&temp, conn), conn);
 }
 
-static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
-                                      struct smc_cdc_msg *peer,
-                                      struct smc_connection *conn)
+static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
+                                       struct smc_cdc_msg *peer,
+                                       struct smc_connection *conn)
 {
        local->common.type = peer->common.type;
        local->len = peer->len;
@@ -218,6 +232,27 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
        local->conn_state_flags = peer->conn_state_flags;
 }
 
+static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
+                                       struct smcd_cdc_msg *peer)
+{
+       local->prod.wrap = peer->prod_wrap;
+       local->prod.count = peer->prod_count;
+       local->cons.wrap = peer->cons_wrap;
+       local->cons.count = peer->cons_count;
+       local->prod_flags = peer->prod_flags;
+       local->conn_state_flags = peer->conn_state_flags;
+}
+
+static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
+                                      struct smc_cdc_msg *peer,
+                                      struct smc_connection *conn)
+{
+       if (conn->lgr->is_smcd)
+               smcd_cdc_msg_to_host(local, (struct smcd_cdc_msg *)peer);
+       else
+               smcr_cdc_msg_to_host(local, peer, conn);
+}
+
 struct smc_cdc_tx_pend;
 
 int smc_cdc_get_free_slot(struct smc_connection *conn,
@@ -227,6 +262,8 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
 int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
                     struct smc_cdc_tx_pend *pend);
 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
+int smcd_cdc_msg_send(struct smc_connection *conn);
 int smc_cdc_init(void) __init;
+void smcd_cdc_rx_init(struct smc_connection *conn);
 
 #endif /* SMC_CDC_H */
index 717449b1da0b73d924488d43cd04ed0871607d1b..038d70ef78923c054283e3a8cef48cbc628335aa 100644 (file)
 #include "smc_core.h"
 #include "smc_clc.h"
 #include "smc_ib.h"
+#include "smc_ism.h"
+
+#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
+#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
 
 /* eye catcher "SMCR" EBCDIC for CLC messages */
 static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
+/* eye catcher "SMCD" EBCDIC for CLC messages */
+static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
 
 /* check if received message has a correct header length and contains valid
  * heading and trailing eyecatchers
@@ -38,10 +44,14 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
        struct smc_clc_msg_decline *dclc;
        struct smc_clc_msg_trail *trl;
 
-       if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+       if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
+           memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
                return false;
        switch (clcm->type) {
        case SMC_CLC_PROPOSAL:
+               if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
+                   clcm->path != SMC_TYPE_B)
+                       return false;
                pclc = (struct smc_clc_msg_proposal *)clcm;
                pclc_prfx = smc_clc_proposal_get_prefix(pclc);
                if (ntohs(pclc->hdr.length) !=
@@ -56,10 +66,16 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
                break;
        case SMC_CLC_ACCEPT:
        case SMC_CLC_CONFIRM:
+               if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D)
+                       return false;
                clc = (struct smc_clc_msg_accept_confirm *)clcm;
-               if (ntohs(clc->hdr.length) != sizeof(*clc))
+               if ((clcm->path == SMC_TYPE_R &&
+                    ntohs(clc->hdr.length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) ||
+                   (clcm->path == SMC_TYPE_D &&
+                    ntohs(clc->hdr.length) != SMCD_CLC_ACCEPT_CONFIRM_LEN))
                        return false;
-               trl = &clc->trl;
+               trl = (struct smc_clc_msg_trail *)
+                       ((u8 *)clc + ntohs(clc->hdr.length) - sizeof(*trl));
                break;
        case SMC_CLC_DECLINE:
                dclc = (struct smc_clc_msg_decline *)clcm;
@@ -70,7 +86,8 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
        default:
                return false;
        }
-       if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+       if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
+           memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
                return false;
        return true;
 }
@@ -295,6 +312,9 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        datlen = ntohs(clcm->length);
        if ((len < sizeof(struct smc_clc_msg_hdr)) ||
            (datlen > buflen) ||
+           (clcm->version != SMC_CLC_V1) ||
+           (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
+            clcm->path != SMC_TYPE_B) ||
            ((clcm->type != SMC_CLC_DECLINE) &&
             (clcm->type != expected_type))) {
                smc->sk.sk_err = EPROTO;
@@ -356,17 +376,18 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
 }
 
 /* send CLC PROPOSAL message across internal TCP socket */
-int smc_clc_send_proposal(struct smc_sock *smc,
-                         struct smc_ib_device *smcibdev,
-                         u8 ibport)
+int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
+                         struct smc_ib_device *ibdev, u8 ibport,
+                         struct smcd_dev *ismdev)
 {
        struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX];
        struct smc_clc_msg_proposal_prefix pclc_prfx;
+       struct smc_clc_msg_smcd pclc_smcd;
        struct smc_clc_msg_proposal pclc;
        struct smc_clc_msg_trail trl;
        int len, i, plen, rc;
        int reason_code = 0;
-       struct kvec vec[4];
+       struct kvec vec[5];
        struct msghdr msg;
 
        /* retrieve ip prefixes for CLC proposal msg */
@@ -381,18 +402,34 @@ int smc_clc_send_proposal(struct smc_sock *smc,
        memset(&pclc, 0, sizeof(pclc));
        memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
        pclc.hdr.type = SMC_CLC_PROPOSAL;
-       pclc.hdr.length = htons(plen);
        pclc.hdr.version = SMC_CLC_V1;          /* SMC version */
-       memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
-       memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE);
-       memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN);
-       pclc.iparea_offset = htons(0);
+       pclc.hdr.path = smc_type;
+       if (smc_type == SMC_TYPE_R || smc_type == SMC_TYPE_B) {
+               /* add SMC-R specifics */
+               memcpy(pclc.lcl.id_for_peer, local_systemid,
+                      sizeof(local_systemid));
+               memcpy(&pclc.lcl.gid, &ibdev->gid[ibport - 1], SMC_GID_SIZE);
+               memcpy(&pclc.lcl.mac, &ibdev->mac[ibport - 1], ETH_ALEN);
+               pclc.iparea_offset = htons(0);
+       }
+       if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
+               /* add SMC-D specifics */
+               memset(&pclc_smcd, 0, sizeof(pclc_smcd));
+               plen += sizeof(pclc_smcd);
+               pclc.iparea_offset = htons(SMC_CLC_PROPOSAL_MAX_OFFSET);
+               pclc_smcd.gid = ismdev->local_gid;
+       }
+       pclc.hdr.length = htons(plen);
 
        memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
        memset(&msg, 0, sizeof(msg));
        i = 0;
        vec[i].iov_base = &pclc;
        vec[i++].iov_len = sizeof(pclc);
+       if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
+               vec[i].iov_base = &pclc_smcd;
+               vec[i++].iov_len = sizeof(pclc_smcd);
+       }
        vec[i].iov_base = &pclc_prfx;
        vec[i++].iov_len = sizeof(pclc_prfx);
        if (pclc_prfx.ipv6_prefixes_cnt > 0) {
@@ -428,35 +465,56 @@ int smc_clc_send_confirm(struct smc_sock *smc)
        struct kvec vec;
        int len;
 
-       link = &conn->lgr->lnk[SMC_SINGLE_LINK];
        /* send SMC Confirm CLC msg */
        memset(&cclc, 0, sizeof(cclc));
-       memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
        cclc.hdr.type = SMC_CLC_CONFIRM;
-       cclc.hdr.length = htons(sizeof(cclc));
        cclc.hdr.version = SMC_CLC_V1;          /* SMC version */
-       memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
-       memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
-              SMC_GID_SIZE);
-       memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
-       hton24(cclc.qpn, link->roce_qp->qp_num);
-       cclc.rmb_rkey =
-               htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
-       cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
-       cclc.rmbe_alert_token = htonl(conn->alert_token_local);
-       cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
-       cclc.rmbe_size = conn->rmbe_size_short;
-       cclc.rmb_dma_addr = cpu_to_be64(
-               (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
-       hton24(cclc.psn, link->psn_initial);
-
-       memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+       if (smc->conn.lgr->is_smcd) {
+               /* SMC-D specific settings */
+               memcpy(cclc.hdr.eyecatcher, SMCD_EYECATCHER,
+                      sizeof(SMCD_EYECATCHER));
+               cclc.hdr.path = SMC_TYPE_D;
+               cclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
+               cclc.gid = conn->lgr->smcd->local_gid;
+               cclc.token = conn->rmb_desc->token;
+               cclc.dmbe_size = conn->rmbe_size_short;
+               cclc.dmbe_idx = 0;
+               memcpy(&cclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
+               memcpy(cclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
+                      sizeof(SMCD_EYECATCHER));
+       } else {
+               /* SMC-R specific settings */
+               link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+               memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER,
+                      sizeof(SMC_EYECATCHER));
+               cclc.hdr.path = SMC_TYPE_R;
+               cclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
+               memcpy(cclc.lcl.id_for_peer, local_systemid,
+                      sizeof(local_systemid));
+               memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
+                      SMC_GID_SIZE);
+               memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1],
+                      ETH_ALEN);
+               hton24(cclc.qpn, link->roce_qp->qp_num);
+               cclc.rmb_rkey =
+                       htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
+               cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
+               cclc.rmbe_alert_token = htonl(conn->alert_token_local);
+               cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
+               cclc.rmbe_size = conn->rmbe_size_short;
+               cclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
+                               (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
+               hton24(cclc.psn, link->psn_initial);
+               memcpy(cclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
+                      sizeof(SMC_EYECATCHER));
+       }
 
        memset(&msg, 0, sizeof(msg));
        vec.iov_base = &cclc;
-       vec.iov_len = sizeof(cclc);
-       len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(cclc));
-       if (len < sizeof(cclc)) {
+       vec.iov_len = ntohs(cclc.hdr.length);
+       len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
+                            ntohs(cclc.hdr.length));
+       if (len < ntohs(cclc.hdr.length)) {
                if (len >= 0) {
                        reason_code = -ENETUNREACH;
                        smc->sk.sk_err = -reason_code;
@@ -479,35 +537,58 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
        int rc = 0;
        int len;
 
-       link = &conn->lgr->lnk[SMC_SINGLE_LINK];
        memset(&aclc, 0, sizeof(aclc));
-       memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
        aclc.hdr.type = SMC_CLC_ACCEPT;
-       aclc.hdr.length = htons(sizeof(aclc));
        aclc.hdr.version = SMC_CLC_V1;          /* SMC version */
        if (srv_first_contact)
                aclc.hdr.flag = 1;
-       memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
-       memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
-              SMC_GID_SIZE);
-       memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
-       hton24(aclc.qpn, link->roce_qp->qp_num);
-       aclc.rmb_rkey =
-               htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
-       aclc.rmbe_idx = 1;                      /* as long as 1 RMB = 1 RMBE */
-       aclc.rmbe_alert_token = htonl(conn->alert_token_local);
-       aclc.qp_mtu = link->path_mtu;
-       aclc.rmbe_size = conn->rmbe_size_short,
-       aclc.rmb_dma_addr = cpu_to_be64(
-               (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
-       hton24(aclc.psn, link->psn_initial);
-       memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+
+       if (new_smc->conn.lgr->is_smcd) {
+               /* SMC-D specific settings */
+               aclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
+               memcpy(aclc.hdr.eyecatcher, SMCD_EYECATCHER,
+                      sizeof(SMCD_EYECATCHER));
+               aclc.hdr.path = SMC_TYPE_D;
+               aclc.gid = conn->lgr->smcd->local_gid;
+               aclc.token = conn->rmb_desc->token;
+               aclc.dmbe_size = conn->rmbe_size_short;
+               aclc.dmbe_idx = 0;
+               memcpy(&aclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
+               memcpy(aclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
+                      sizeof(SMCD_EYECATCHER));
+       } else {
+               /* SMC-R specific settings */
+               aclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
+               memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER,
+                      sizeof(SMC_EYECATCHER));
+               aclc.hdr.path = SMC_TYPE_R;
+               link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+               memcpy(aclc.lcl.id_for_peer, local_systemid,
+                      sizeof(local_systemid));
+               memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
+                      SMC_GID_SIZE);
+               memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1],
+                      ETH_ALEN);
+               hton24(aclc.qpn, link->roce_qp->qp_num);
+               aclc.rmb_rkey =
+                       htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
+               aclc.rmbe_idx = 1;              /* as long as 1 RMB = 1 RMBE */
+               aclc.rmbe_alert_token = htonl(conn->alert_token_local);
+               aclc.qp_mtu = link->path_mtu;
+               aclc.rmbe_size = conn->rmbe_size_short,
+               aclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
+                               (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
+               hton24(aclc.psn, link->psn_initial);
+               memcpy(aclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
+                      sizeof(SMC_EYECATCHER));
+       }
 
        memset(&msg, 0, sizeof(msg));
        vec.iov_base = &aclc;
-       vec.iov_len = sizeof(aclc);
-       len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, sizeof(aclc));
-       if (len < sizeof(aclc)) {
+       vec.iov_len = ntohs(aclc.hdr.length);
+       len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1,
+                            ntohs(aclc.hdr.length));
+       if (len < ntohs(aclc.hdr.length)) {
                if (len >= 0)
                        new_smc->sk.sk_err = EPROTO;
                else
index 41ff9ea96139ced3b2a6760af2f310d40e6f58a7..100e988ad1a854941939b830757bbb407ab96f89 100644 (file)
@@ -23,6 +23,9 @@
 #define SMC_CLC_DECLINE                0x04
 
 #define SMC_CLC_V1             0x1             /* SMC version                */
+#define SMC_TYPE_R             0               /* SMC-R only                 */
+#define SMC_TYPE_D             1               /* SMC-D only                 */
+#define SMC_TYPE_B             3               /* SMC-R and SMC-D            */
 #define CLC_WAIT_TIME          (6 * HZ)        /* max. wait time on clcsock  */
 #define SMC_CLC_DECL_MEM       0x01010000  /* insufficient memory resources  */
 #define SMC_CLC_DECL_TIMEOUT   0x02000000  /* timeout                        */
@@ -42,9 +45,11 @@ struct smc_clc_msg_hdr {     /* header1 of clc messages */
 #if defined(__BIG_ENDIAN_BITFIELD)
        u8 version : 4,
           flag    : 1,
-          rsvd    : 3;
+          rsvd    : 1,
+          path    : 2;
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
-       u8 rsvd    : 3,
+       u8 path    : 2,
+          rsvd    : 1,
           flag    : 1,
           version : 4;
 #endif
@@ -77,6 +82,11 @@ struct smc_clc_msg_proposal_prefix { /* prefix part of clc proposal message*/
        u8 ipv6_prefixes_cnt;   /* number of IPv6 prefixes in prefix array */
 } __aligned(4);
 
+struct smc_clc_msg_smcd {      /* SMC-D GID information */
+       u64 gid;                /* ISM GID of requestor */
+       u8 res[32];
+};
+
 struct smc_clc_msg_proposal {  /* clc proposal message sent by Linux */
        struct smc_clc_msg_hdr hdr;
        struct smc_clc_msg_local lcl;
@@ -94,23 +104,45 @@ struct smc_clc_msg_proposal {      /* clc proposal message sent by Linux */
 
 struct smc_clc_msg_accept_confirm {    /* clc accept / confirm message */
        struct smc_clc_msg_hdr hdr;
-       struct smc_clc_msg_local lcl;
-       u8 qpn[3];              /* QP number */
-       __be32 rmb_rkey;        /* RMB rkey */
-       u8 rmbe_idx;            /* Index of RMBE in RMB */
-       __be32 rmbe_alert_token;/* unique connection id */
+       union {
+               struct { /* SMC-R */
+                       struct smc_clc_msg_local lcl;
+                       u8 qpn[3];              /* QP number */
+                       __be32 rmb_rkey;        /* RMB rkey */
+                       u8 rmbe_idx;            /* Index of RMBE in RMB */
+                       __be32 rmbe_alert_token;/* unique connection id */
 #if defined(__BIG_ENDIAN_BITFIELD)
-       u8 rmbe_size : 4,       /* RMBE buf size (compressed notation) */
-          qp_mtu   : 4;        /* QP mtu */
+                       u8 rmbe_size : 4,       /* buf size (compressed) */
+                          qp_mtu   : 4;        /* QP mtu */
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
-       u8 qp_mtu   : 4,
-          rmbe_size : 4;
+                       u8 qp_mtu   : 4,
+                          rmbe_size : 4;
 #endif
-       u8 reserved;
-       __be64 rmb_dma_addr;    /* RMB virtual address */
-       u8 reserved2;
-       u8 psn[3];              /* initial packet sequence number */
-       struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
+                       u8 reserved;
+                       __be64 rmb_dma_addr;    /* RMB virtual address */
+                       u8 reserved2;
+                       u8 psn[3];              /* packet sequence number */
+                       struct smc_clc_msg_trail smcr_trl;
+                                               /* eye catcher "SMCR" EBCDIC */
+               } __packed;
+               struct { /* SMC-D */
+                       u64 gid;                /* Sender GID */
+                       u64 token;              /* DMB token */
+                       u8 dmbe_idx;            /* DMBE index */
+#if defined(__BIG_ENDIAN_BITFIELD)
+                       u8 dmbe_size : 4,       /* buf size (compressed) */
+                          reserved3 : 4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+                       u8 reserved3 : 4,
+                          dmbe_size : 4;
+#endif
+                       u16 reserved4;
+                       u32 linkid;             /* Link identifier */
+                       u32 reserved5[3];
+                       struct smc_clc_msg_trail smcd_trl;
+                                               /* eye catcher "SMCD" EBCDIC */
+               } __packed;
+       };
 } __packed;                    /* format defined in RFC7609 */
 
 struct smc_clc_msg_decline {   /* clc decline message */
@@ -129,13 +161,26 @@ smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
               ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
 }
 
+/* get SMC-D info from proposal message */
+static inline struct smc_clc_msg_smcd *
+smc_get_clc_msg_smcd(struct smc_clc_msg_proposal *prop)
+{
+       if (ntohs(prop->iparea_offset) != sizeof(struct smc_clc_msg_smcd))
+               return NULL;
+
+       return (struct smc_clc_msg_smcd *)(prop + 1);
+}
+
+struct smcd_dev;
+
 int smc_clc_prfx_match(struct socket *clcsock,
                       struct smc_clc_msg_proposal_prefix *prop);
 int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
                     u8 expected_type);
 int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info);
-int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev,
-                         u8 ibport);
+int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
+                         struct smc_ib_device *smcibdev, u8 ibport,
+                         struct smcd_dev *ismdev);
 int smc_clc_send_confirm(struct smc_sock *smc);
 int smc_clc_send_accept(struct smc_sock *smc, int srv_first_contact);
 
index add82b0266f303ac943ecf8786e1b61e401a9b36..66741e61a3b07244f43791e21f3d16ef3990cc2d 100644 (file)
@@ -25,6 +25,7 @@
 #include "smc_llc.h"
 #include "smc_cdc.h"
 #include "smc_close.h"
+#include "smc_ism.h"
 
 #define SMC_LGR_NUM_INCR               256
 #define SMC_LGR_FREE_DELAY_SERV                (600 * HZ)
@@ -46,8 +47,8 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
         * otherwise there is a risk of out-of-sync link groups.
         */
        mod_delayed_work(system_wq, &lgr->free_work,
-                        lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
-                                                SMC_LGR_FREE_DELAY_SERV);
+                        (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
+                        SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV);
 }
 
 /* Register connection's alert token in our lookup structure.
@@ -153,16 +154,18 @@ static void smc_lgr_free_work(struct work_struct *work)
 free:
        spin_unlock_bh(&smc_lgr_list.lock);
        if (!delayed_work_pending(&lgr->free_work)) {
-               if (lgr->lnk[SMC_SINGLE_LINK].state != SMC_LNK_INACTIVE)
+               if (!lgr->is_smcd &&
+                   lgr->lnk[SMC_SINGLE_LINK].state != SMC_LNK_INACTIVE)
                        smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
                smc_lgr_free(lgr);
        }
 }
 
 /* create a new SMC link group */
-static int smc_lgr_create(struct smc_sock *smc,
+static int smc_lgr_create(struct smc_sock *smc, bool is_smcd,
                          struct smc_ib_device *smcibdev, u8 ibport,
-                         char *peer_systemid, unsigned short vlan_id)
+                         char *peer_systemid, unsigned short vlan_id,
+                         struct smcd_dev *smcismdev, u64 peer_gid)
 {
        struct smc_link_group *lgr;
        struct smc_link *lnk;
@@ -170,17 +173,23 @@ static int smc_lgr_create(struct smc_sock *smc,
        int rc = 0;
        int i;
 
+       if (is_smcd && vlan_id) {
+               rc = smc_ism_get_vlan(smcismdev, vlan_id);
+               if (rc)
+                       goto out;
+       }
+
        lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
        if (!lgr) {
                rc = -ENOMEM;
                goto out;
        }
-       lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
+       lgr->is_smcd = is_smcd;
        lgr->sync_err = 0;
-       memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
        lgr->vlan_id = vlan_id;
        rwlock_init(&lgr->sndbufs_lock);
        rwlock_init(&lgr->rmbs_lock);
+       rwlock_init(&lgr->conns_lock);
        for (i = 0; i < SMC_RMBE_SIZES; i++) {
                INIT_LIST_HEAD(&lgr->sndbufs[i]);
                INIT_LIST_HEAD(&lgr->rmbs[i]);
@@ -189,36 +198,44 @@ static int smc_lgr_create(struct smc_sock *smc,
        memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
        INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
        lgr->conns_all = RB_ROOT;
-
-       lnk = &lgr->lnk[SMC_SINGLE_LINK];
-       /* initialize link */
-       lnk->state = SMC_LNK_ACTIVATING;
-       lnk->link_id = SMC_SINGLE_LINK;
-       lnk->smcibdev = smcibdev;
-       lnk->ibport = ibport;
-       lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
-       if (!smcibdev->initialized)
-               smc_ib_setup_per_ibdev(smcibdev);
-       get_random_bytes(rndvec, sizeof(rndvec));
-       lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16);
-       rc = smc_llc_link_init(lnk);
-       if (rc)
-               goto free_lgr;
-       rc = smc_wr_alloc_link_mem(lnk);
-       if (rc)
-               goto clear_llc_lnk;
-       rc = smc_ib_create_protection_domain(lnk);
-       if (rc)
-               goto free_link_mem;
-       rc = smc_ib_create_queue_pair(lnk);
-       if (rc)
-               goto dealloc_pd;
-       rc = smc_wr_create_link(lnk);
-       if (rc)
-               goto destroy_qp;
-
+       if (is_smcd) {
+               /* SMC-D specific settings */
+               lgr->peer_gid = peer_gid;
+               lgr->smcd = smcismdev;
+       } else {
+               /* SMC-R specific settings */
+               lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
+               memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
+
+               lnk = &lgr->lnk[SMC_SINGLE_LINK];
+               /* initialize link */
+               lnk->state = SMC_LNK_ACTIVATING;
+               lnk->link_id = SMC_SINGLE_LINK;
+               lnk->smcibdev = smcibdev;
+               lnk->ibport = ibport;
+               lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
+               if (!smcibdev->initialized)
+                       smc_ib_setup_per_ibdev(smcibdev);
+               get_random_bytes(rndvec, sizeof(rndvec));
+               lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
+                       (rndvec[2] << 16);
+               rc = smc_llc_link_init(lnk);
+               if (rc)
+                       goto free_lgr;
+               rc = smc_wr_alloc_link_mem(lnk);
+               if (rc)
+                       goto clear_llc_lnk;
+               rc = smc_ib_create_protection_domain(lnk);
+               if (rc)
+                       goto free_link_mem;
+               rc = smc_ib_create_queue_pair(lnk);
+               if (rc)
+                       goto dealloc_pd;
+               rc = smc_wr_create_link(lnk);
+               if (rc)
+                       goto destroy_qp;
+       }
        smc->conn.lgr = lgr;
-       rwlock_init(&lgr->conns_lock);
        spin_lock_bh(&smc_lgr_list.lock);
        list_add(&lgr->list, &smc_lgr_list.list);
        spin_unlock_bh(&smc_lgr_list.lock);
@@ -264,7 +281,12 @@ void smc_conn_free(struct smc_connection *conn)
 {
        if (!conn->lgr)
                return;
-       smc_cdc_tx_dismiss_slots(conn);
+       if (conn->lgr->is_smcd) {
+               smc_ism_unset_conn(conn);
+               tasklet_kill(&conn->rx_tsklet);
+       } else {
+               smc_cdc_tx_dismiss_slots(conn);
+       }
        smc_lgr_unregister_conn(conn);
        smc_buf_unuse(conn);
 }
@@ -280,8 +302,8 @@ static void smc_link_clear(struct smc_link *lnk)
        smc_wr_free_link_mem(lnk);
 }
 
-static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
-                        struct smc_buf_desc *buf_desc)
+static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
+                         struct smc_buf_desc *buf_desc)
 {
        struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
 
@@ -301,6 +323,28 @@ static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
        kfree(buf_desc);
 }
 
+static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
+                         struct smc_buf_desc *buf_desc)
+{
+       if (is_dmb) {
+               /* restore original buf len */
+               buf_desc->len += sizeof(struct smcd_cdc_msg);
+               smc_ism_unregister_dmb(lgr->smcd, buf_desc);
+       } else {
+               kfree(buf_desc->cpu_addr);
+       }
+       kfree(buf_desc);
+}
+
+static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
+                        struct smc_buf_desc *buf_desc)
+{
+       if (lgr->is_smcd)
+               smcd_buf_free(lgr, is_rmb, buf_desc);
+       else
+               smcr_buf_free(lgr, is_rmb, buf_desc);
+}
+
 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
 {
        struct smc_buf_desc *buf_desc, *bf_desc;
@@ -332,7 +376,10 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr)
 void smc_lgr_free(struct smc_link_group *lgr)
 {
        smc_lgr_free_bufs(lgr);
-       smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
+       if (lgr->is_smcd)
+               smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
+       else
+               smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
        kfree(lgr);
 }
 
@@ -357,7 +404,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
        lgr->terminating = 1;
        if (!list_empty(&lgr->list)) /* forget lgr */
                list_del_init(&lgr->list);
-       smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
+       if (!lgr->is_smcd)
+               smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
 
        write_lock_bh(&lgr->conns_lock);
        node = rb_first(&lgr->conns_all);
@@ -374,7 +422,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
                node = rb_first(&lgr->conns_all);
        }
        write_unlock_bh(&lgr->conns_lock);
-       wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
+       if (!lgr->is_smcd)
+               wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
        smc_lgr_schedule_free_work(lgr);
 }
 
@@ -392,17 +441,44 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
 
        spin_lock_bh(&smc_lgr_list.lock);
        list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
-               if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
+               if (!lgr->is_smcd &&
+                   lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
                    lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
                        __smc_lgr_terminate(lgr);
        }
        spin_unlock_bh(&smc_lgr_list.lock);
 }
 
+/* Called when SMC-D device is terminated or peer is lost */
+void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
+{
+       struct smc_link_group *lgr, *l;
+       LIST_HEAD(lgr_free_list);
+
+       /* run common cleanup function and build free list */
+       spin_lock_bh(&smc_lgr_list.lock);
+       list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
+               if (lgr->is_smcd && lgr->smcd == dev &&
+                   (!peer_gid || lgr->peer_gid == peer_gid) &&
+                   !list_empty(&lgr->list)) {
+                       __smc_lgr_terminate(lgr);
+                       list_move(&lgr->list, &lgr_free_list);
+               }
+       }
+       spin_unlock_bh(&smc_lgr_list.lock);
+
+       /* cancel the regular free workers and actually free lgrs */
+       list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
+               list_del_init(&lgr->list);
+               cancel_delayed_work_sync(&lgr->free_work);
+               smc_lgr_free(lgr);
+       }
+}
+
 /* Determine vlan of internal TCP socket.
  * @vlan_id: address to store the determined vlan id into
  */
-static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
+int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
 {
        struct dst_entry *dst = sk_dst_get(clcsock->sk);
        struct net_device *ndev;
@@ -477,10 +553,30 @@ static int smc_link_determine_gid(struct smc_link_group *lgr)
        return -ENODEV;
 }
 
+static bool smcr_lgr_match(struct smc_link_group *lgr,
+                          struct smc_clc_msg_local *lcl,
+                          enum smc_lgr_role role)
+{
+       return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
+                      SMC_SYSTEMID_LEN) &&
+               !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
+                       SMC_GID_SIZE) &&
+               !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
+                       sizeof(lcl->mac)) &&
+               lgr->role == role;
+}
+
+static bool smcd_lgr_match(struct smc_link_group *lgr,
+                          struct smcd_dev *smcismdev, u64 peer_gid)
+{
+       return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
+}
+
 /* create a new SMC connection (and a new link group if necessary) */
-int smc_conn_create(struct smc_sock *smc,
+int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
                    struct smc_ib_device *smcibdev, u8 ibport,
-                   struct smc_clc_msg_local *lcl, int srv_first_contact)
+                   struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
+                   u64 peer_gid)
 {
        struct smc_connection *conn = &smc->conn;
        int local_contact = SMC_FIRST_CONTACT;
@@ -502,17 +598,12 @@ int smc_conn_create(struct smc_sock *smc,
        spin_lock_bh(&smc_lgr_list.lock);
        list_for_each_entry(lgr, &smc_lgr_list.list, list) {
                write_lock_bh(&lgr->conns_lock);
-               if (!memcmp(lgr->peer_systemid, lcl->id_for_peer,
-                           SMC_SYSTEMID_LEN) &&
-                   !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
-                           SMC_GID_SIZE) &&
-                   !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
-                           sizeof(lcl->mac)) &&
+               if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
+                    smcr_lgr_match(lgr, lcl, role)) &&
                    !lgr->sync_err &&
-                   (lgr->role == role) &&
-                   (lgr->vlan_id == vlan_id) &&
-                   ((role == SMC_CLNT) ||
-                    (lgr->conns_num < SMC_RMBS_PER_LGR_MAX))) {
+                   lgr->vlan_id == vlan_id &&
+                   (role == SMC_CLNT ||
+                    lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
                        /* link group found */
                        local_contact = SMC_REUSE_CONTACT;
                        conn->lgr = lgr;
@@ -535,16 +626,21 @@ int smc_conn_create(struct smc_sock *smc,
 
 create:
        if (local_contact == SMC_FIRST_CONTACT) {
-               rc = smc_lgr_create(smc, smcibdev, ibport,
-                                   lcl->id_for_peer, vlan_id);
+               rc = smc_lgr_create(smc, is_smcd, smcibdev, ibport,
+                                   lcl->id_for_peer, vlan_id, smcd, peer_gid);
                if (rc)
                        goto out;
                smc_lgr_register_conn(conn); /* add smc conn to lgr */
-               rc = smc_link_determine_gid(conn->lgr);
+               if (!is_smcd)
+                       rc = smc_link_determine_gid(conn->lgr);
        }
        conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
        conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
        conn->urg_state = SMC_URG_READ;
+       if (is_smcd) {
+               conn->rx_off = sizeof(struct smcd_cdc_msg);
+               smcd_cdc_rx_init(conn); /* init tasklet for this conn */
+       }
 #ifndef KERNEL_HAS_ATOMIC64
        spin_lock_init(&conn->acurs_lock);
 #endif
@@ -609,8 +705,8 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size)
        return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
 }
 
-static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr,
-                                              bool is_rmb, int bufsize)
+static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
+                                               bool is_rmb, int bufsize)
 {
        struct smc_buf_desc *buf_desc;
        struct smc_link *lnk;
@@ -668,7 +764,44 @@ static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr,
        return buf_desc;
 }
 
-static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
+#define SMCD_DMBE_SIZES                7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
+
+static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
+                                               bool is_dmb, int bufsize)
+{
+       struct smc_buf_desc *buf_desc;
+       int rc;
+
+       if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
+               return ERR_PTR(-EAGAIN);
+
+       /* try to alloc a new DMB */
+       buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
+       if (!buf_desc)
+               return ERR_PTR(-ENOMEM);
+       if (is_dmb) {
+               rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
+               if (rc) {
+                       kfree(buf_desc);
+                       return ERR_PTR(-EAGAIN);
+               }
+               buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
+               /* CDC header stored in buf. So, pretend it was smaller */
+               buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
+       } else {
+               buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
+                                            __GFP_NOWARN | __GFP_NORETRY |
+                                            __GFP_NOMEMALLOC);
+               if (!buf_desc->cpu_addr) {
+                       kfree(buf_desc);
+                       return ERR_PTR(-EAGAIN);
+               }
+               buf_desc->len = bufsize;
+       }
+       return buf_desc;
+}
+
+static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
 {
        struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
        struct smc_connection *conn = &smc->conn;
@@ -706,7 +839,11 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
                        break; /* found reusable slot */
                }
 
-               buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize);
+               if (is_smcd)
+                       buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
+               else
+                       buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
+
                if (PTR_ERR(buf_desc) == -ENOMEM)
                        break;
                if (IS_ERR(buf_desc))
@@ -727,7 +864,10 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
                conn->rmbe_size_short = bufsize_short;
                smc->sk.sk_rcvbuf = bufsize * 2;
                atomic_set(&conn->bytes_to_rcv, 0);
-               conn->rmbe_update_limit = smc_rmb_wnd_update_limit(bufsize);
+               conn->rmbe_update_limit =
+                       smc_rmb_wnd_update_limit(buf_desc->len);
+               if (is_smcd)
+                       smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
        } else {
                conn->sndbuf_desc = buf_desc;
                smc->sk.sk_sndbuf = bufsize * 2;
@@ -740,6 +880,8 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
 {
        struct smc_link_group *lgr = conn->lgr;
 
+       if (!conn->lgr || conn->lgr->is_smcd)
+               return;
        smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
                               conn->sndbuf_desc, DMA_TO_DEVICE);
 }
@@ -748,6 +890,8 @@ void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
 {
        struct smc_link_group *lgr = conn->lgr;
 
+       if (!conn->lgr || conn->lgr->is_smcd)
+               return;
        smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
                                  conn->sndbuf_desc, DMA_TO_DEVICE);
 }
@@ -756,6 +900,8 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
 {
        struct smc_link_group *lgr = conn->lgr;
 
+       if (!conn->lgr || conn->lgr->is_smcd)
+               return;
        smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
                               conn->rmb_desc, DMA_FROM_DEVICE);
 }
@@ -764,6 +910,8 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
 {
        struct smc_link_group *lgr = conn->lgr;
 
+       if (!conn->lgr || conn->lgr->is_smcd)
+               return;
        smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
                                  conn->rmb_desc, DMA_FROM_DEVICE);
 }
@@ -774,16 +922,16 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
  * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
  * extra RMB for every connection in a link group
  */
-int smc_buf_create(struct smc_sock *smc)
+int smc_buf_create(struct smc_sock *smc, bool is_smcd)
 {
        int rc;
 
        /* create send buffer */
-       rc = __smc_buf_create(smc, false);
+       rc = __smc_buf_create(smc, is_smcd, false);
        if (rc)
                return rc;
        /* create rmb */
-       rc = __smc_buf_create(smc, true);
+       rc = __smc_buf_create(smc, is_smcd, true);
        if (rc)
                smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
        return rc;
@@ -865,7 +1013,8 @@ void smc_core_exit(void)
        spin_unlock_bh(&smc_lgr_list.lock);
        list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
                list_del_init(&lgr->list);
-               smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
+               if (!lgr->is_smcd)
+                       smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
                cancel_delayed_work_sync(&lgr->free_work);
                smc_lgr_free(lgr); /* free link group */
        }
index 93cb3523bf5093dbe4e30d34bd02e05eab98c5c3..8b47e0168fc3d56518707bb8d38debe470665902 100644 (file)
@@ -124,15 +124,28 @@ struct smc_buf_desc {
        void                    *cpu_addr;      /* virtual address of buffer */
        struct page             *pages;
        int                     len;            /* length of buffer */
-       struct sg_table         sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */
-       struct ib_mr            *mr_rx[SMC_LINKS_PER_LGR_MAX];
-                                               /* for rmb only: memory region
-                                                * incl. rkey provided to peer
-                                                */
-       u32                     order;          /* allocation order */
        u32                     used;           /* currently used / unused */
        u8                      reused  : 1;    /* new created / reused */
        u8                      regerr  : 1;    /* err during registration */
+       union {
+               struct { /* SMC-R */
+                       struct sg_table         sgt[SMC_LINKS_PER_LGR_MAX];
+                                               /* virtual buffer */
+                       struct ib_mr            *mr_rx[SMC_LINKS_PER_LGR_MAX];
+                                               /* for rmb only: memory region
+                                                * incl. rkey provided to peer
+                                                */
+                       u32                     order;  /* allocation order */
+               };
+               struct { /* SMC-D */
+                       unsigned short          sba_idx;
+                                               /* SBA index number */
+                       u64                     token;
+                                               /* DMB token number */
+                       dma_addr_t              dma_addr;
+                                               /* DMA address */
+               };
+       };
 };
 
 struct smc_rtoken {                            /* address/key of remote RMB */
@@ -148,12 +161,10 @@ struct smc_rtoken {                               /* address/key of remote RMB */
  * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15)
  */
 
+struct smcd_dev;
+
 struct smc_link_group {
        struct list_head        list;
-       enum smc_lgr_role       role;           /* client or server */
-       struct smc_link         lnk[SMC_LINKS_PER_LGR_MAX];     /* smc link */
-       char                    peer_systemid[SMC_SYSTEMID_LEN];
-                                               /* unique system_id of peer */
        struct rb_root          conns_all;      /* connection tree */
        rwlock_t                conns_lock;     /* protects conns_all */
        unsigned int            conns_num;      /* current # of connections */
@@ -163,17 +174,35 @@ struct smc_link_group {
        rwlock_t                sndbufs_lock;   /* protects tx buffers */
        struct list_head        rmbs[SMC_RMBE_SIZES];   /* rx buffers */
        rwlock_t                rmbs_lock;      /* protects rx buffers */
-       struct smc_rtoken       rtokens[SMC_RMBS_PER_LGR_MAX]
-                                      [SMC_LINKS_PER_LGR_MAX];
-                                               /* remote addr/key pairs */
-       unsigned long           rtokens_used_mask[BITS_TO_LONGS(
-                                                       SMC_RMBS_PER_LGR_MAX)];
-                                               /* used rtoken elements */
 
        u8                      id[SMC_LGR_ID_SIZE];    /* unique lgr id */
        struct delayed_work     free_work;      /* delayed freeing of an lgr */
        u8                      sync_err : 1;   /* lgr no longer fits to peer */
        u8                      terminating : 1;/* lgr is terminating */
+
+       bool                    is_smcd;        /* SMC-R or SMC-D */
+       union {
+               struct { /* SMC-R */
+                       enum smc_lgr_role       role;
+                                               /* client or server */
+                       struct smc_link         lnk[SMC_LINKS_PER_LGR_MAX];
+                                               /* smc link */
+                       char                    peer_systemid[SMC_SYSTEMID_LEN];
+                                               /* unique system_id of peer */
+                       struct smc_rtoken       rtokens[SMC_RMBS_PER_LGR_MAX]
+                                               [SMC_LINKS_PER_LGR_MAX];
+                                               /* remote addr/key pairs */
+                       unsigned long           rtokens_used_mask[BITS_TO_LONGS
+                                                       (SMC_RMBS_PER_LGR_MAX)];
+                                               /* used rtoken elements */
+               };
+               struct { /* SMC-D */
+                       u64                     peer_gid;
+                                               /* Peer GID (remote) */
+                       struct smcd_dev         *smcd;
+                                               /* ISM device for VLAN reg. */
+               };
+       };
 };
 
 /* Find the connection associated with the given alert token in the link group.
@@ -217,7 +246,8 @@ void smc_lgr_free(struct smc_link_group *lgr);
 void smc_lgr_forget(struct smc_link_group *lgr);
 void smc_lgr_terminate(struct smc_link_group *lgr);
 void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
-int smc_buf_create(struct smc_sock *smc);
+void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid);
+int smc_buf_create(struct smc_sock *smc, bool is_smcd);
 int smc_uncompress_bufsize(u8 compressed);
 int smc_rmb_rtoken_handling(struct smc_connection *conn,
                            struct smc_clc_msg_accept_confirm *clc);
@@ -227,9 +257,13 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn);
 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn);
 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn);
 void smc_rmb_sync_sg_for_device(struct smc_connection *conn);
+int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id);
+
 void smc_conn_free(struct smc_connection *conn);
-int smc_conn_create(struct smc_sock *smc,
+int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
                    struct smc_ib_device *smcibdev, u8 ibport,
-                   struct smc_clc_msg_local *lcl, int srv_first_contact);
+                   struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
+                   u64 peer_gid);
+void smcd_conn_free(struct smc_connection *conn);
 void smc_core_exit(void);
 #endif
index 839354402215a836556fd881c350ca0ddb6b1c1b..6d83eef1b743da03cd06302076236d5a668b33e6 100644 (file)
@@ -136,7 +136,8 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
                        goto errout;
        }
 
-       if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr &&
+       if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
+           (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
            !list_empty(&smc->conn.lgr->list)) {
                struct smc_diag_lgrinfo linfo = {
                        .role = smc->conn.lgr->role,
@@ -155,6 +156,21 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
                if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
                        goto errout;
        }
+       if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
+           (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
+           !list_empty(&smc->conn.lgr->list)) {
+               struct smc_connection *conn = &smc->conn;
+               struct smcd_diag_dmbinfo dinfo = {
+                       .linkid = *((u32 *)conn->lgr->id),
+                       .peer_gid = conn->lgr->peer_gid,
+                       .my_gid = conn->lgr->smcd->local_gid,
+                       .token = conn->rmb_desc->token,
+                       .peer_token = conn->peer_token
+               };
+
+               if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
+                       goto errout;
+       }
 
        nlmsg_end(skb, nlh);
        return 0;
index 0eed7ab9f28b54c77010d85558d1b26b8e65b208..36de2fd76170be0774d547268c8350be8804b8bb 100644 (file)
@@ -143,6 +143,62 @@ int smc_ib_ready_link(struct smc_link *lnk)
        return rc;
 }
 
+static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport)
+{
+       struct ib_gid_attr gattr;
+       int rc;
+
+       rc = ib_query_gid(smcibdev->ibdev, ibport, 0,
+                         &smcibdev->gid[ibport - 1], &gattr);
+       if (rc || !gattr.ndev)
+               return -ENODEV;
+
+       memcpy(smcibdev->mac[ibport - 1], gattr.ndev->dev_addr, ETH_ALEN);
+       dev_put(gattr.ndev);
+       return 0;
+}
+
+/* Create an identifier unique for this instance of SMC-R.
+ * The MAC-address of the first active registered IB device
+ * plus a random 2-byte number is used to create this identifier.
+ * This name is delivered to the peer during connection initialization.
+ */
+static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
+                                               u8 ibport)
+{
+       memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
+              sizeof(smcibdev->mac[ibport - 1]));
+       get_random_bytes(&local_systemid[0], 2);
+}
+
+bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
+{
+       return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
+}
+
+static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
+{
+       int rc;
+
+       memset(&smcibdev->pattr[ibport - 1], 0,
+              sizeof(smcibdev->pattr[ibport - 1]));
+       rc = ib_query_port(smcibdev->ibdev, ibport,
+                          &smcibdev->pattr[ibport - 1]);
+       if (rc)
+               goto out;
+       /* the SMC protocol requires specification of the RoCE MAC address */
+       rc = smc_ib_fill_gid_and_mac(smcibdev, ibport);
+       if (rc)
+               goto out;
+       if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET,
+                    sizeof(local_systemid)) &&
+           smc_ib_port_active(smcibdev, ibport))
+               /* create unique system identifier */
+               smc_ib_define_local_systemid(smcibdev, ibport);
+out:
+       return rc;
+}
+
 /* process context wrapper for might_sleep smc_ib_remember_port_attr */
 static void smc_ib_port_event_work(struct work_struct *work)
 {
@@ -370,62 +426,6 @@ void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev,
        buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0;
 }
 
-static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport)
-{
-       struct ib_gid_attr gattr;
-       int rc;
-
-       rc = ib_query_gid(smcibdev->ibdev, ibport, 0,
-                         &smcibdev->gid[ibport - 1], &gattr);
-       if (rc || !gattr.ndev)
-               return -ENODEV;
-
-       memcpy(smcibdev->mac[ibport - 1], gattr.ndev->dev_addr, ETH_ALEN);
-       dev_put(gattr.ndev);
-       return 0;
-}
-
-/* Create an identifier unique for this instance of SMC-R.
- * The MAC-address of the first active registered IB device
- * plus a random 2-byte number is used to create this identifier.
- * This name is delivered to the peer during connection initialization.
- */
-static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
-                                               u8 ibport)
-{
-       memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
-              sizeof(smcibdev->mac[ibport - 1]));
-       get_random_bytes(&local_systemid[0], 2);
-}
-
-bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
-{
-       return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
-}
-
-int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
-{
-       int rc;
-
-       memset(&smcibdev->pattr[ibport - 1], 0,
-              sizeof(smcibdev->pattr[ibport - 1]));
-       rc = ib_query_port(smcibdev->ibdev, ibport,
-                          &smcibdev->pattr[ibport - 1]);
-       if (rc)
-               goto out;
-       /* the SMC protocol requires specification of the RoCE MAC address */
-       rc = smc_ib_fill_gid_and_mac(smcibdev, ibport);
-       if (rc)
-               goto out;
-       if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET,
-                    sizeof(local_systemid)) &&
-           smc_ib_port_active(smcibdev, ibport))
-               /* create unique system identifier */
-               smc_ib_define_local_systemid(smcibdev, ibport);
-out:
-       return rc;
-}
-
 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
 {
        struct ib_cq_init_attr cqattr = {
@@ -454,9 +454,6 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
                smcibdev->roce_cq_recv = NULL;
                goto err;
        }
-       INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
-                             smc_ib_global_event_handler);
-       ib_register_event_handler(&smcibdev->event_handler);
        smc_wr_add_dev(smcibdev);
        smcibdev->initialized = 1;
        return rc;
@@ -472,7 +469,6 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
                return;
        smcibdev->initialized = 0;
        smc_wr_remove_dev(smcibdev);
-       ib_unregister_event_handler(&smcibdev->event_handler);
        ib_destroy_cq(smcibdev->roce_cq_recv);
        ib_destroy_cq(smcibdev->roce_cq_send);
 }
@@ -483,6 +479,8 @@ static struct ib_client smc_ib_client;
 static void smc_ib_add_dev(struct ib_device *ibdev)
 {
        struct smc_ib_device *smcibdev;
+       u8 port_cnt;
+       int i;
 
        if (ibdev->node_type != RDMA_NODE_IB_CA)
                return;
@@ -498,6 +496,21 @@ static void smc_ib_add_dev(struct ib_device *ibdev)
        list_add_tail(&smcibdev->list, &smc_ib_devices.list);
        spin_unlock(&smc_ib_devices.lock);
        ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
+       INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
+                             smc_ib_global_event_handler);
+       ib_register_event_handler(&smcibdev->event_handler);
+
+       /* trigger reading of the port attributes */
+       port_cnt = smcibdev->ibdev->phys_port_cnt;
+       for (i = 0;
+            i < min_t(size_t, port_cnt, SMC_MAX_PORTS);
+            i++) {
+               set_bit(i, &smcibdev->port_event_mask);
+               /* determine pnetids of the port */
+               smc_pnetid_by_dev_port(ibdev->dev.parent, i,
+                                      smcibdev->pnetid[i]);
+       }
+       schedule_work(&smcibdev->port_event_work);
 }
 
 /* callback function for ib_register_client() */
@@ -512,6 +525,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
        spin_unlock(&smc_ib_devices.lock);
        smc_pnet_remove_by_ibdev(smcibdev);
        smc_ib_cleanup_per_ibdev(smcibdev);
+       ib_unregister_event_handler(&smcibdev->event_handler);
        kfree(smcibdev);
 }
 
index e90630dadf8e9565e129e07c0ff8e782acab7fb8..7c1223c9122923efa81684c5f20aa27e8af5df8a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/interrupt.h>
 #include <linux/if_ether.h>
 #include <rdma/ib_verbs.h>
+#include <net/smc.h>
 
 #define SMC_MAX_PORTS                  2       /* Max # of ports */
 #define SMC_GID_SIZE                   sizeof(union ib_gid)
@@ -40,6 +41,8 @@ struct smc_ib_device {                                /* ib-device infos for smc */
        char                    mac[SMC_MAX_PORTS][ETH_ALEN];
                                                /* mac address per port*/
        union ib_gid            gid[SMC_MAX_PORTS]; /* gid per port */
+       u8                      pnetid[SMC_MAX_PORTS][SMC_MAX_PNETID_LEN];
+                                               /* pnetid per port */
        u8                      initialized : 1; /* ib dev CQ, evthdl done */
        struct work_struct      port_event_work;
        unsigned long           port_event_mask;
@@ -51,7 +54,6 @@ struct smc_link;
 int smc_ib_register_client(void) __init;
 void smc_ib_unregister_client(void);
 bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport);
-int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport);
 int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
                      struct smc_buf_desc *buf_slot,
                      enum dma_data_direction data_direction);
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
new file mode 100644 (file)
index 0000000..cfade7f
--- /dev/null
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Shared Memory Communications Direct over ISM devices (SMC-D)
+ *
+ * Functions for ISM device.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+
+#include "smc.h"
+#include "smc_core.h"
+#include "smc_ism.h"
+#include "smc_pnet.h"
+
+struct smcd_dev_list smcd_dev_list = {
+       .list = LIST_HEAD_INIT(smcd_dev_list.list),
+       .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock)
+};
+
+/* Test if an ISM communication is possible. */
+int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd)
+{
+       return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0,
+                                          vlan_id);
+}
+
+int smc_ism_write(struct smcd_dev *smcd, const struct smc_ism_position *pos,
+                 void *data, size_t len)
+{
+       int rc;
+
+       rc = smcd->ops->move_data(smcd, pos->token, pos->index, pos->signal,
+                                 pos->offset, data, len);
+
+       return rc < 0 ? rc : 0;
+}
+
+/* Set a connection using this DMBE. */
+void smc_ism_set_conn(struct smc_connection *conn)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
+       conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn;
+       spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
+}
+
+/* Unset a connection using this DMBE. */
+void smc_ism_unset_conn(struct smc_connection *conn)
+{
+       unsigned long flags;
+
+       if (!conn->rmb_desc)
+               return;
+
+       spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
+       conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL;
+       spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
+}
+
+/* Register a VLAN identifier with the ISM device. Use a reference count
+ * and add a VLAN identifier only when the first DMB using this VLAN is
+ * registered.
+ */
+int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid)
+{
+       struct smc_ism_vlanid *new_vlan, *vlan;
+       unsigned long flags;
+       int rc = 0;
+
+       if (!vlanid)                    /* No valid vlan id */
+               return -EINVAL;
+
+       /* create new vlan entry, in case we need it */
+       new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL);
+       if (!new_vlan)
+               return -ENOMEM;
+       new_vlan->vlanid = vlanid;
+       refcount_set(&new_vlan->refcnt, 1);
+
+       /* if there is an existing entry, increase count and return */
+       spin_lock_irqsave(&smcd->lock, flags);
+       list_for_each_entry(vlan, &smcd->vlan, list) {
+               if (vlan->vlanid == vlanid) {
+                       refcount_inc(&vlan->refcnt);
+                       kfree(new_vlan);
+                       goto out;
+               }
+       }
+
+       /* no existing entry found.
+        * add new entry to device; might fail, e.g., if HW limit reached
+        */
+       if (smcd->ops->add_vlan_id(smcd, vlanid)) {
+               kfree(new_vlan);
+               rc = -EIO;
+               goto out;
+       }
+       list_add_tail(&new_vlan->list, &smcd->vlan);
+out:
+       spin_unlock_irqrestore(&smcd->lock, flags);
+       return rc;
+}
+
+/* Unregister a VLAN identifier with the ISM device. Use a reference count
+ * and remove a VLAN identifier only when the last DMB using this VLAN is
+ * unregistered.
+ */
+int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid)
+{
+       struct smc_ism_vlanid *vlan;
+       unsigned long flags;
+       bool found = false;
+       int rc = 0;
+
+       if (!vlanid)                    /* No valid vlan id */
+               return -EINVAL;
+
+       spin_lock_irqsave(&smcd->lock, flags);
+       list_for_each_entry(vlan, &smcd->vlan, list) {
+               if (vlan->vlanid == vlanid) {
+                       if (!refcount_dec_and_test(&vlan->refcnt))
+                               goto out;
+                       found = true;
+                       break;
+               }
+       }
+       if (!found) {
+               rc = -ENOENT;
+               goto out;               /* VLAN id not in table */
+       }
+
+       /* Found and the last reference just gone */
+       if (smcd->ops->del_vlan_id(smcd, vlanid))
+               rc = -EIO;
+       list_del(&vlan->list);
+       kfree(vlan);
+out:
+       spin_unlock_irqrestore(&smcd->lock, flags);
+       return rc;
+}
+
+int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
+{
+       struct smcd_dmb dmb;
+
+       memset(&dmb, 0, sizeof(dmb));
+       dmb.dmb_tok = dmb_desc->token;
+       dmb.sba_idx = dmb_desc->sba_idx;
+       dmb.cpu_addr = dmb_desc->cpu_addr;
+       dmb.dma_addr = dmb_desc->dma_addr;
+       dmb.dmb_len = dmb_desc->len;
+       return smcd->ops->unregister_dmb(smcd, &dmb);
+}
+
+int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
+                        struct smc_buf_desc *dmb_desc)
+{
+       struct smcd_dmb dmb;
+       int rc;
+
+       memset(&dmb, 0, sizeof(dmb));
+       dmb.dmb_len = dmb_len;
+       dmb.sba_idx = dmb_desc->sba_idx;
+       dmb.vlan_id = lgr->vlan_id;
+       dmb.rgid = lgr->peer_gid;
+       rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb);
+       if (!rc) {
+               dmb_desc->sba_idx = dmb.sba_idx;
+               dmb_desc->token = dmb.dmb_tok;
+               dmb_desc->cpu_addr = dmb.cpu_addr;
+               dmb_desc->dma_addr = dmb.dma_addr;
+               dmb_desc->len = dmb.dmb_len;
+       }
+       return rc;
+}
+
+struct smc_ism_event_work {
+       struct work_struct work;
+       struct smcd_dev *smcd;
+       struct smcd_event event;
+};
+
+/* worker for SMC-D events */
+static void smc_ism_event_work(struct work_struct *work)
+{
+       struct smc_ism_event_work *wrk =
+               container_of(work, struct smc_ism_event_work, work);
+
+       switch (wrk->event.type) {
+       case ISM_EVENT_GID:     /* GID event, token is peer GID */
+               smc_smcd_terminate(wrk->smcd, wrk->event.tok);
+               break;
+       case ISM_EVENT_DMB:
+               break;
+       }
+       kfree(wrk);
+}
+
+static void smcd_release(struct device *dev)
+{
+       struct smcd_dev *smcd = container_of(dev, struct smcd_dev, dev);
+
+       kfree(smcd->conn);
+       kfree(smcd);
+}
+
+struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
+                               const struct smcd_ops *ops, int max_dmbs)
+{
+       struct smcd_dev *smcd;
+
+       smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
+       if (!smcd)
+               return NULL;
+       smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
+                            GFP_KERNEL);
+       if (!smcd->conn) {
+               kfree(smcd);
+               return NULL;
+       }
+
+       smcd->dev.parent = parent;
+       smcd->dev.release = smcd_release;
+       device_initialize(&smcd->dev);
+       dev_set_name(&smcd->dev, name);
+       smcd->ops = ops;
+       smc_pnetid_by_dev_port(parent, 0, smcd->pnetid);
+
+       spin_lock_init(&smcd->lock);
+       INIT_LIST_HEAD(&smcd->vlan);
+       smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
+                                                WQ_MEM_RECLAIM, name);
+       return smcd;
+}
+EXPORT_SYMBOL_GPL(smcd_alloc_dev);
+
+int smcd_register_dev(struct smcd_dev *smcd)
+{
+       spin_lock(&smcd_dev_list.lock);
+       list_add_tail(&smcd->list, &smcd_dev_list.list);
+       spin_unlock(&smcd_dev_list.lock);
+
+       return device_add(&smcd->dev);
+}
+EXPORT_SYMBOL_GPL(smcd_register_dev);
+
+void smcd_unregister_dev(struct smcd_dev *smcd)
+{
+       spin_lock(&smcd_dev_list.lock);
+       list_del(&smcd->list);
+       spin_unlock(&smcd_dev_list.lock);
+       flush_workqueue(smcd->event_wq);
+       destroy_workqueue(smcd->event_wq);
+       smc_smcd_terminate(smcd, 0);
+
+       device_del(&smcd->dev);
+}
+EXPORT_SYMBOL_GPL(smcd_unregister_dev);
+
+void smcd_free_dev(struct smcd_dev *smcd)
+{
+       put_device(&smcd->dev);
+}
+EXPORT_SYMBOL_GPL(smcd_free_dev);
+
+/* SMCD Device event handler. Called from ISM device interrupt handler.
+ * Parameters are smcd device pointer,
+ * - event->type (0 --> DMB, 1 --> GID),
+ * - event->code (event code),
+ * - event->tok (either DMB token when event type 0, or GID when event type 1)
+ * - event->time (time of day)
+ * - event->info (debug info).
+ *
+ * Context:
+ * - Function called in IRQ context from ISM device driver event handler.
+ */
+void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event)
+{
+       struct smc_ism_event_work *wrk;
+
+       /* copy event to event work queue, and let it be handled there */
+       wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC);
+       if (!wrk)
+               return;
+       INIT_WORK(&wrk->work, smc_ism_event_work);
+       wrk->smcd = smcd;
+       wrk->event = *event;
+       queue_work(smcd->event_wq, &wrk->work);
+}
+EXPORT_SYMBOL_GPL(smcd_handle_event);
+
+/* SMCD Device interrupt handler. Called from ISM device interrupt handler.
+ * Parameters are smcd device pointer and DMB number. Find the connection and
+ * schedule the tasklet for this connection.
+ *
+ * Context:
+ * - Function called in IRQ context from ISM device driver IRQ handler.
+ */
+void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno)
+{
+       struct smc_connection *conn = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&smcd->lock, flags);
+       conn = smcd->conn[dmbno];
+       if (conn)
+               tasklet_schedule(&conn->rx_tsklet);
+       spin_unlock_irqrestore(&smcd->lock, flags);
+}
+EXPORT_SYMBOL_GPL(smcd_handle_irq);
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
new file mode 100644 (file)
index 0000000..aee45b8
--- /dev/null
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Shared Memory Communications Direct over ISM devices (SMC-D)
+ *
+ * SMC-D ISM device structure definitions.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef SMCD_ISM_H
+#define SMCD_ISM_H
+
+#include <linux/uio.h>
+
+#include "smc.h"
+
+struct smcd_dev_list { /* List of SMCD devices */
+       struct list_head list;
+       spinlock_t lock;        /* Protects list of devices */
+};
+
+extern struct smcd_dev_list    smcd_dev_list; /* list of smcd devices */
+
+struct smc_ism_vlanid {                        /* VLAN id set on ISM device */
+       struct list_head list;
+       unsigned short vlanid;          /* Vlan id */
+       refcount_t refcnt;              /* Reference count */
+};
+
+struct smc_ism_position {      /* ISM device position to write to */
+       u64 token;              /* Token of DMB */
+       u32 offset;             /* Offset into DMBE */
+       u8 index;               /* Index of DMBE */
+       u8 signal;              /* Generate interrupt on owner side */
+};
+
+struct smcd_dev;
+
+int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *dev);
+void smc_ism_set_conn(struct smc_connection *conn);
+void smc_ism_unset_conn(struct smc_connection *conn);
+int smc_ism_get_vlan(struct smcd_dev *dev, unsigned short vlan_id);
+int smc_ism_put_vlan(struct smcd_dev *dev, unsigned short vlan_id);
+int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size,
+                        struct smc_buf_desc *dmb_desc);
+int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc);
+int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos,
+                 void *data, size_t len);
+#endif
index d7b88b2d1b224195b2d82523c047052c67f2e1eb..1b6c066d34950e5e213df3826ce073f0e78b1396 100644 (file)
 
 #include "smc_pnet.h"
 #include "smc_ib.h"
-
-#define SMC_MAX_PNET_ID_LEN    16      /* Max. length of PNET id */
+#include "smc_ism.h"
 
 static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = {
        [SMC_PNETID_NAME] = {
                .type = NLA_NUL_STRING,
-               .len = SMC_MAX_PNET_ID_LEN - 1
+               .len = SMC_MAX_PNETID_LEN - 1
        },
        [SMC_PNETID_ETHNAME] = {
                .type = NLA_NUL_STRING,
@@ -65,7 +64,7 @@ static struct smc_pnettable {
  */
 struct smc_pnetentry {
        struct list_head list;
-       char pnet_name[SMC_MAX_PNET_ID_LEN + 1];
+       char pnet_name[SMC_MAX_PNETID_LEN + 1];
        struct net_device *ndev;
        struct smc_ib_device *smcibdev;
        u8 ib_port;
@@ -209,7 +208,7 @@ static bool smc_pnetid_valid(const char *pnet_name, char *pnetid)
                return false;
        while (--end >= bf && isspace(*end))
                ;
-       if (end - bf >= SMC_MAX_PNET_ID_LEN)
+       if (end - bf >= SMC_MAX_PNETID_LEN)
                return false;
        while (bf <= end) {
                if (!isalnum(*bf))
@@ -358,9 +357,6 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
                kfree(pnetelem);
                return rc;
        }
-       rc = smc_ib_remember_port_attr(pnetelem->smcibdev, pnetelem->ib_port);
-       if (rc)
-               smc_pnet_remove_by_pnetid(pnetelem->pnet_name);
        return rc;
 }
 
@@ -485,10 +481,10 @@ static int smc_pnet_netdev_event(struct notifier_block *this,
        case NETDEV_REBOOT:
        case NETDEV_UNREGISTER:
                smc_pnet_remove_by_ndev(event_dev);
+               return NOTIFY_OK;
        default:
-               break;
+               return NOTIFY_DONE;
        }
-       return NOTIFY_DONE;
 }
 
 static struct notifier_block smc_netdev_notifier = {
@@ -515,26 +511,91 @@ void smc_pnet_exit(void)
        genl_unregister_family(&smc_pnet_nl_family);
 }
 
-/* PNET table analysis for a given sock:
- * determine ib_device and port belonging to used internal TCP socket
- * ethernet interface.
+/* Determine one base device for stacked net devices.
+ * If the lower device level contains more than one devices
+ * (for instance with bonding slaves), just the first device
+ * is used to reach a base device.
  */
-void smc_pnet_find_roce_resource(struct sock *sk,
-                                struct smc_ib_device **smcibdev, u8 *ibport)
+static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
 {
-       struct dst_entry *dst = sk_dst_get(sk);
-       struct smc_pnetentry *pnetelem;
+       int i, nest_lvl;
 
-       *smcibdev = NULL;
-       *ibport = 0;
+       rtnl_lock();
+       nest_lvl = dev_get_nest_level(ndev);
+       for (i = 0; i < nest_lvl; i++) {
+               struct list_head *lower = &ndev->adj_list.lower;
+
+               if (list_empty(lower))
+                       break;
+               lower = lower->next;
+               ndev = netdev_lower_get_next(ndev, &lower);
+       }
+       rtnl_unlock();
+       return ndev;
+}
+
+/* Determine the corresponding IB device port based on the hardware PNETID.
+ * Searching stops at the first matching active IB device port.
+ */
+static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
+                                        struct smc_ib_device **smcibdev,
+                                        u8 *ibport)
+{
+       u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
+       struct smc_ib_device *ibdev;
+       int i;
+
+       ndev = pnet_find_base_ndev(ndev);
+       if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port,
+                                  ndev_pnetid))
+               return; /* pnetid could not be determined */
+
+       spin_lock(&smc_ib_devices.lock);
+       list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
+               for (i = 1; i <= SMC_MAX_PORTS; i++) {
+                       if (!memcmp(ibdev->pnetid[i - 1], ndev_pnetid,
+                                   SMC_MAX_PNETID_LEN) &&
+                           smc_ib_port_active(ibdev, i)) {
+                               *smcibdev = ibdev;
+                               *ibport = i;
+                               break;
+                       }
+               }
+       }
+       spin_unlock(&smc_ib_devices.lock);
+}
+
+static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
+                                       struct smcd_dev **smcismdev)
+{
+       u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
+       struct smcd_dev *ismdev;
+
+       ndev = pnet_find_base_ndev(ndev);
+       if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port,
+                                  ndev_pnetid))
+               return; /* pnetid could not be determined */
+
+       spin_lock(&smcd_dev_list.lock);
+       list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
+               if (!memcmp(ismdev->pnetid, ndev_pnetid, SMC_MAX_PNETID_LEN)) {
+                       *smcismdev = ismdev;
+                       break;
+               }
+       }
+       spin_unlock(&smcd_dev_list.lock);
+}
+
+/* Lookup of coupled ib_device via SMC pnet table */
+static void smc_pnet_find_roce_by_table(struct net_device *netdev,
+                                       struct smc_ib_device **smcibdev,
+                                       u8 *ibport)
+{
+       struct smc_pnetentry *pnetelem;
 
-       if (!dst)
-               return;
-       if (!dst->dev)
-               goto out_rel;
        read_lock(&smc_pnettable.lock);
        list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
-               if (dst->dev == pnetelem->ndev) {
+               if (netdev == pnetelem->ndev) {
                        if (smc_ib_port_active(pnetelem->smcibdev,
                                               pnetelem->ib_port)) {
                                *smcibdev = pnetelem->smcibdev;
@@ -544,6 +605,54 @@ void smc_pnet_find_roce_resource(struct sock *sk,
                }
        }
        read_unlock(&smc_pnettable.lock);
+}
+
+/* PNET table analysis for a given sock:
+ * determine ib_device and port belonging to used internal TCP socket
+ * ethernet interface.
+ */
+void smc_pnet_find_roce_resource(struct sock *sk,
+                                struct smc_ib_device **smcibdev, u8 *ibport)
+{
+       struct dst_entry *dst = sk_dst_get(sk);
+
+       *smcibdev = NULL;
+       *ibport = 0;
+
+       if (!dst)
+               goto out;
+       if (!dst->dev)
+               goto out_rel;
+
+       /* if possible, lookup via hardware-defined pnetid */
+       smc_pnet_find_roce_by_pnetid(dst->dev, smcibdev, ibport);
+       if (*smcibdev)
+               goto out_rel;
+
+       /* lookup via SMC PNET table */
+       smc_pnet_find_roce_by_table(dst->dev, smcibdev, ibport);
+
+out_rel:
+       dst_release(dst);
+out:
+       return;
+}
+
+void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev)
+{
+       struct dst_entry *dst = sk_dst_get(sk);
+
+       *smcismdev = NULL;
+       if (!dst)
+               goto out;
+       if (!dst->dev)
+               goto out_rel;
+
+       /* if possible, lookup via hardware-defined pnetid */
+       smc_pnet_find_ism_by_pnetid(dst->dev, smcismdev);
+
 out_rel:
        dst_release(dst);
+out:
+       return;
 }
index 5a29519db976f2b1b542e7aacacaed4f3f8ea812..1e94fd4df7bc254bdc6ba0e341e45b43e8f1489c 100644 (file)
 #ifndef _SMC_PNET_H
 #define _SMC_PNET_H
 
+#if IS_ENABLED(CONFIG_HAVE_PNETID)
+#include <asm/pnet.h>
+#endif
+
 struct smc_ib_device;
+struct smcd_dev;
+
+static inline int smc_pnetid_by_dev_port(struct device *dev,
+                                        unsigned short port, u8 *pnetid)
+{
+#if IS_ENABLED(CONFIG_HAVE_PNETID)
+       return pnet_id_by_dev_port(dev, port, pnetid);
+#else
+       return -ENOENT;
+#endif
+}
 
 int smc_pnet_init(void) __init;
 void smc_pnet_exit(void);
 int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev);
 void smc_pnet_find_roce_resource(struct sock *sk,
                                 struct smc_ib_device **smcibdev, u8 *ibport);
+void smc_pnet_find_ism_resource(struct sock *sk, struct smcd_dev **smcismdev);
 
 #endif
index 3d77b383cccd97f7770580f3512e642aeae24d6b..b329803c8339dab65ab2112cb7a1971931286075 100644 (file)
@@ -305,7 +305,7 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
        target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
 
        /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
-       rcvbuf_base = conn->rmb_desc->cpu_addr;
+       rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;
 
        do { /* while (read_remaining) */
                if (read_done >= target || (pipe && read_done))
index cee66640075242fc7fe863734ebf301d261e02d6..142bcb134dd64879fcade28dc35993886519a04c 100644 (file)
@@ -24,6 +24,7 @@
 #include "smc.h"
 #include "smc_wr.h"
 #include "smc_cdc.h"
+#include "smc_ism.h"
 #include "smc_tx.h"
 
 #define SMC_TX_WORK_DELAY      HZ
@@ -250,6 +251,24 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
 
 /***************************** sndbuf consumer *******************************/
 
+/* sndbuf consumer: actual data transfer of one target chunk with ISM write */
+int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
+                     u32 offset, int signal)
+{
+       struct smc_ism_position pos;
+       int rc;
+
+       memset(&pos, 0, sizeof(pos));
+       pos.token = conn->peer_token;
+       pos.index = conn->peer_rmbe_idx;
+       pos.offset = conn->tx_off + offset;
+       pos.signal = signal;
+       rc = smc_ism_write(conn->lgr->smcd, &pos, data, len);
+       if (rc)
+               conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+       return rc;
+}
+
 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
                             int num_sges, struct ib_sge sges[])
@@ -297,21 +316,104 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn,
        smc_curs_add(conn->sndbuf_desc->len, sent, len);
 }
 
+/* SMC-R helper for smc_tx_rdma_writes() */
+static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
+                              size_t src_off, size_t src_len,
+                              size_t dst_off, size_t dst_len)
+{
+       dma_addr_t dma_addr =
+               sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
+       struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+       int src_len_sum = src_len, dst_len_sum = dst_len;
+       struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
+       int sent_count = src_off;
+       int srcchunk, dstchunk;
+       int num_sges;
+       int rc;
+
+       for (dstchunk = 0; dstchunk < 2; dstchunk++) {
+               num_sges = 0;
+               for (srcchunk = 0; srcchunk < 2; srcchunk++) {
+                       sges[srcchunk].addr = dma_addr + src_off;
+                       sges[srcchunk].length = src_len;
+                       sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
+                       num_sges++;
+
+                       src_off += src_len;
+                       if (src_off >= conn->sndbuf_desc->len)
+                               src_off -= conn->sndbuf_desc->len;
+                                               /* modulo in send ring */
+                       if (src_len_sum == dst_len)
+                               break; /* either on 1st or 2nd iteration */
+                       /* prepare next (== 2nd) iteration */
+                       src_len = dst_len - src_len; /* remainder */
+                       src_len_sum += src_len;
+               }
+               rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
+               if (rc)
+                       return rc;
+               if (dst_len_sum == len)
+                       break; /* either on 1st or 2nd iteration */
+               /* prepare next (== 2nd) iteration */
+               dst_off = 0; /* modulo offset in RMBE ring buffer */
+               dst_len = len - dst_len; /* remainder */
+               dst_len_sum += dst_len;
+               src_len = min_t(int, dst_len, conn->sndbuf_desc->len -
+                               sent_count);
+               src_len_sum = src_len;
+       }
+       return 0;
+}
+
+/* SMC-D helper for smc_tx_rdma_writes() */
+static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
+                              size_t src_off, size_t src_len,
+                              size_t dst_off, size_t dst_len)
+{
+       int src_len_sum = src_len, dst_len_sum = dst_len;
+       int srcchunk, dstchunk;
+       int rc;
+
+       for (dstchunk = 0; dstchunk < 2; dstchunk++) {
+               for (srcchunk = 0; srcchunk < 2; srcchunk++) {
+                       void *data = conn->sndbuf_desc->cpu_addr + src_off;
+
+                       rc = smcd_tx_ism_write(conn, data, src_len, dst_off +
+                                              sizeof(struct smcd_cdc_msg), 0);
+                       if (rc)
+                               return rc;
+                       dst_off += src_len;
+                       src_off += src_len;
+                       if (src_off >= conn->sndbuf_desc->len)
+                               src_off -= conn->sndbuf_desc->len;
+                                               /* modulo in send ring */
+                       if (src_len_sum == dst_len)
+                               break; /* either on 1st or 2nd iteration */
+                       /* prepare next (== 2nd) iteration */
+                       src_len = dst_len - src_len; /* remainder */
+                       src_len_sum += src_len;
+               }
+               if (dst_len_sum == len)
+                       break; /* either on 1st or 2nd iteration */
+               /* prepare next (== 2nd) iteration */
+               dst_off = 0; /* modulo offset in RMBE ring buffer */
+               dst_len = len - dst_len; /* remainder */
+               dst_len_sum += dst_len;
+               src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off);
+               src_len_sum = src_len;
+       }
+       return 0;
+}
+
 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
  * usable snd_wnd as max transmit
  */
 static int smc_tx_rdma_writes(struct smc_connection *conn)
 {
-       size_t src_off, src_len, dst_off, dst_len; /* current chunk values */
-       size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk;
+       size_t len, src_len, dst_off, dst_len; /* current chunk values */
        union smc_host_cursor sent, prep, prod, cons;
-       struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
-       struct smc_link_group *lgr = conn->lgr;
        struct smc_cdc_producer_flags *pflags;
        int to_send, rmbespace;
-       struct smc_link *link;
-       dma_addr_t dma_addr;
-       int num_sges;
        int rc;
 
        /* source: sndbuf */
@@ -341,7 +443,6 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
        len = min(to_send, rmbespace);
 
        /* initialize variables for first iteration of subsequent nested loop */
-       link = &lgr->lnk[SMC_SINGLE_LINK];
        dst_off = prod.count;
        if (prod.wrap == cons.wrap) {
                /* the filled destination area is unwrapped,
@@ -358,8 +459,6 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
                 */
                dst_len = len;
        }
-       dst_len_sum = dst_len;
-       src_off = sent.count;
        /* dst_len determines the maximum src_len */
        if (sent.count + dst_len <= conn->sndbuf_desc->len) {
                /* unwrapped src case: single chunk of entire dst_len */
@@ -368,38 +467,15 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
                /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
                src_len = conn->sndbuf_desc->len - sent.count;
        }
-       src_len_sum = src_len;
-       dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
-       for (dstchunk = 0; dstchunk < 2; dstchunk++) {
-               num_sges = 0;
-               for (srcchunk = 0; srcchunk < 2; srcchunk++) {
-                       sges[srcchunk].addr = dma_addr + src_off;
-                       sges[srcchunk].length = src_len;
-                       sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
-                       num_sges++;
-                       src_off += src_len;
-                       if (src_off >= conn->sndbuf_desc->len)
-                               src_off -= conn->sndbuf_desc->len;
-                                               /* modulo in send ring */
-                       if (src_len_sum == dst_len)
-                               break; /* either on 1st or 2nd iteration */
-                       /* prepare next (== 2nd) iteration */
-                       src_len = dst_len - src_len; /* remainder */
-                       src_len_sum += src_len;
-               }
-               rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
-               if (rc)
-                       return rc;
-               if (dst_len_sum == len)
-                       break; /* either on 1st or 2nd iteration */
-               /* prepare next (== 2nd) iteration */
-               dst_off = 0; /* modulo offset in RMBE ring buffer */
-               dst_len = len - dst_len; /* remainder */
-               dst_len_sum += dst_len;
-               src_len = min_t(int,
-                               dst_len, conn->sndbuf_desc->len - sent.count);
-               src_len_sum = src_len;
-       }
+
+       if (conn->lgr->is_smcd)
+               rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len,
+                                        dst_off, dst_len);
+       else
+               rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
+                                        dst_off, dst_len);
+       if (rc)
+               return rc;
 
        if (conn->urg_tx_pend && len == to_send)
                pflags->urg_data_present = 1;
@@ -420,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
 /* Wakeup sndbuf consumers from any context (IRQ or process)
  * since there is more data to transmit; usable snd_wnd as max transmit
  */
-int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
+static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
 {
        struct smc_cdc_producer_flags *pflags;
        struct smc_cdc_tx_pend *pend;
@@ -467,6 +543,37 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
        return rc;
 }
 
+static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+       struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
+       int rc = 0;
+
+       spin_lock_bh(&conn->send_lock);
+       if (!pflags->urg_data_present)
+               rc = smc_tx_rdma_writes(conn);
+       if (!rc)
+               rc = smcd_cdc_msg_send(conn);
+
+       if (!rc && pflags->urg_data_present) {
+               pflags->urg_data_pending = 0;
+               pflags->urg_data_present = 0;
+       }
+       spin_unlock_bh(&conn->send_lock);
+       return rc;
+}
+
+int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+       int rc;
+
+       if (conn->lgr->is_smcd)
+               rc = smcd_tx_sndbuf_nonempty(conn);
+       else
+               rc = smcr_tx_sndbuf_nonempty(conn);
+
+       return rc;
+}
+
 /* Wakeup sndbuf consumers from process context
  * since there is more data to transmit
  */
@@ -495,7 +602,8 @@ void smc_tx_work(struct work_struct *work)
 
 void smc_tx_consumer_update(struct smc_connection *conn, bool force)
 {
-       union smc_host_cursor cfed, cons;
+       union smc_host_cursor cfed, cons, prod;
+       int sender_free = conn->rmb_desc->len;
        int to_confirm;
 
        smc_curs_write(&cons,
@@ -505,11 +613,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
                       smc_curs_read(&conn->rx_curs_confirmed, conn),
                       conn);
        to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
+       if (to_confirm > conn->rmbe_update_limit) {
+               smc_curs_write(&prod,
+                              smc_curs_read(&conn->local_rx_ctrl.prod, conn),
+                              conn);
+               sender_free = conn->rmb_desc->len -
+                             smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
+       }
 
        if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
            force ||
            ((to_confirm > conn->rmbe_update_limit) &&
-            ((to_confirm > (conn->rmb_desc->len / 2)) ||
+            ((sender_free <= (conn->rmb_desc->len / 2)) ||
              conn->local_rx_ctrl.prod_flags.write_blocked))) {
                if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
                    conn->alert_token_local) { /* connection healthy */
index 9d2238909fa08d72e63537607132dd3ac6a4e93f..b22bdc5694c4da4ca41c0c166b2862153efa8257 100644 (file)
@@ -33,5 +33,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len);
 int smc_tx_sndbuf_nonempty(struct smc_connection *conn);
 void smc_tx_sndbuf_nonfull(struct smc_sock *smc);
 void smc_tx_consumer_update(struct smc_connection *conn, bool force);
+int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
+                     u32 offset, int signal);
 
 #endif /* SMC_TX_H */
index 8a109012608a6132a65293c86cd175426b851cbe..85633622c94d011796517feb4d935b7ccba68445 100644 (file)
@@ -117,10 +117,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
 static int sock_mmap(struct file *file, struct vm_area_struct *vma);
 
 static int sock_close(struct inode *inode, struct file *file);
-static struct wait_queue_head *sock_get_poll_head(struct file *file,
-               __poll_t events);
-static __poll_t sock_poll_mask(struct file *file, __poll_t);
-static __poll_t sock_poll(struct file *file, struct poll_table_struct *wait);
+static __poll_t sock_poll(struct file *file,
+                             struct poll_table_struct *wait);
 static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 #ifdef CONFIG_COMPAT
 static long compat_sock_ioctl(struct file *file,
@@ -143,8 +141,6 @@ static const struct file_operations socket_file_ops = {
        .llseek =       no_llseek,
        .read_iter =    sock_read_iter,
        .write_iter =   sock_write_iter,
-       .get_poll_head = sock_get_poll_head,
-       .poll_mask =    sock_poll_mask,
        .poll =         sock_poll,
        .unlocked_ioctl = sock_ioctl,
 #ifdef CONFIG_COMPAT
@@ -1130,48 +1126,16 @@ int sock_create_lite(int family, int type, int protocol, struct socket **res)
 }
 EXPORT_SYMBOL(sock_create_lite);
 
-static struct wait_queue_head *sock_get_poll_head(struct file *file,
-               __poll_t events)
-{
-       struct socket *sock = file->private_data;
-
-       if (!sock->ops->poll_mask)
-               return NULL;
-       sock_poll_busy_loop(sock, events);
-       return sk_sleep(sock->sk);
-}
-
-static __poll_t sock_poll_mask(struct file *file, __poll_t events)
-{
-       struct socket *sock = file->private_data;
-
-       /*
-        * We need to be sure we are in sync with the socket flags modification.
-        *
-        * This memory barrier is paired in the wq_has_sleeper.
-        */
-       smp_mb();
-
-       /* this socket can poll_ll so tell the system call */
-       return sock->ops->poll_mask(sock, events) |
-               (sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0);
-}
-
 /* No kernel lock held - perfect */
 static __poll_t sock_poll(struct file *file, poll_table *wait)
 {
        struct socket *sock = file->private_data;
-       __poll_t events = poll_requested_events(wait), mask = 0;
-
-       if (sock->ops->poll) {
-               sock_poll_busy_loop(sock, events);
-               mask = sock->ops->poll(file, sock, wait);
-       } else if (sock->ops->poll_mask) {
-               sock_poll_wait(file, sock_get_poll_head(file, events), wait);
-               mask = sock->ops->poll_mask(sock, events);
-       }
+       __poll_t events = poll_requested_events(wait);
 
-       return mask | sock_poll_busy_flag(sock);
+       sock_poll_busy_loop(sock, events);
+       if (!sock->ops->poll)
+               return 0;
+       return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock);
 }
 
 static int sock_mmap(struct file *file, struct vm_area_struct *vma)
index 1a96951835999091c81ba451700f0a74565d9c59..3a512936eea984c96f8ef8f69b4a535ae21fc967 100644 (file)
@@ -35,7 +35,6 @@ struct _strp_msg {
         */
        struct strp_msg strp;
        int accum_len;
-       int early_eaten;
 };
 
 static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
@@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
        head = strp->skb_head;
        if (head) {
                /* Message already in progress */
-
-               stm = _strp_msg(head);
-               if (unlikely(stm->early_eaten)) {
-                       /* Already some number of bytes on the receive sock
-                        * data saved in skb_head, just indicate they
-                        * are consumed.
-                        */
-                       eaten = orig_len <= stm->early_eaten ?
-                               orig_len : stm->early_eaten;
-                       stm->early_eaten -= eaten;
-
-                       return eaten;
-               }
-
                if (unlikely(orig_offset)) {
                        /* Getting data with a non-zero offset when a message is
                         * in progress is not expected. If it does happen, we
@@ -155,11 +140,13 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                        /* We are going to append to the frags_list of head.
                         * Need to unshare the frag_list.
                         */
-                       err = skb_unclone(head, GFP_ATOMIC);
-                       if (err) {
-                               STRP_STATS_INCR(strp->stats.mem_fail);
-                               desc->error = err;
-                               return 0;
+                       if (skb_has_frag_list(head)) {
+                               err = skb_unclone(head, GFP_ATOMIC);
+                               if (err) {
+                                       STRP_STATS_INCR(strp->stats.mem_fail);
+                                       desc->error = err;
+                                       return 0;
+                               }
                        }
 
                        if (unlikely(skb_shinfo(head)->frag_list)) {
@@ -216,14 +203,16 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                        memset(stm, 0, sizeof(*stm));
                        stm->strp.offset = orig_offset + eaten;
                } else {
-                       /* Unclone since we may be appending to an skb that we
+                       /* Unclone if we are appending to an skb that we
                         * already share a frag_list with.
                         */
-                       err = skb_unclone(skb, GFP_ATOMIC);
-                       if (err) {
-                               STRP_STATS_INCR(strp->stats.mem_fail);
-                               desc->error = err;
-                               break;
+                       if (skb_has_frag_list(skb)) {
+                               err = skb_unclone(skb, GFP_ATOMIC);
+                               if (err) {
+                                       STRP_STATS_INCR(strp->stats.mem_fail);
+                                       desc->error = err;
+                                       break;
+                               }
                        }
 
                        stm = _strp_msg(head);
@@ -297,9 +286,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                                }
 
                                stm->accum_len += cand_len;
+                               eaten += cand_len;
                                strp->need_bytes = stm->strp.full_len -
                                                       stm->accum_len;
-                               stm->early_eaten = cand_len;
                                STRP_STATS_ADD(strp->stats.bytes, cand_len);
                                desc->count = 0; /* Stop reading socket */
                                break;
@@ -392,7 +381,7 @@ static int strp_read_sock(struct strparser *strp)
 /* Lower sock lock held */
 void strp_data_ready(struct strparser *strp)
 {
-       if (unlikely(strp->stopped))
+       if (unlikely(strp->stopped) || strp->paused)
                return;
 
        /* This check is needed to synchronize with do_strp_work.
@@ -407,9 +396,6 @@ void strp_data_ready(struct strparser *strp)
                return;
        }
 
-       if (strp->paused)
-               return;
-
        if (strp->need_bytes) {
                if (strp_peek_len(strp) < strp->need_bytes)
                        return;
index 3c85af058227d14bda8d9f598ec45e7b8db1785e..3fabf9f6a0f9d92eaccbc33a9600ca2d1370aa18 100644 (file)
@@ -987,8 +987,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
                task->tk_status = -EAGAIN;
                goto out_unlock;
        }
-       if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
-               req->rq_xid = xprt_alloc_xid(xprt);
        ret = true;
 out_unlock:
        spin_unlock_bh(&xprt->transport_lock);
@@ -1298,7 +1296,12 @@ void xprt_retry_reserve(struct rpc_task *task)
 
 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
 {
-       return (__force __be32)xprt->xid++;
+       __be32 xid;
+
+       spin_lock(&xprt->reserve_lock);
+       xid = (__force __be32)xprt->xid++;
+       spin_unlock(&xprt->reserve_lock);
+       return xid;
 }
 
 static inline void xprt_init_xid(struct rpc_xprt *xprt)
@@ -1316,6 +1319,7 @@ void xprt_request_init(struct rpc_task *task)
        req->rq_task    = task;
        req->rq_xprt    = xprt;
        req->rq_buffer  = NULL;
+       req->rq_xid     = xprt_alloc_xid(xprt);
        req->rq_connect_cookie = xprt->connect_cookie - 1;
        req->rq_bytes_sent = 0;
        req->rq_snd_buf.len = 0;
index 2dfb492a7c943b61f1b0c551faa6ba1230f3159f..fd6d8f18955ca21941360790f9ef42fef42d70a1 100644 (file)
@@ -610,6 +610,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
        case NETDEV_CHANGE:
                if (netif_carrier_ok(dev))
                        break;
+               /* else: fall through */
        case NETDEV_UP:
                test_and_set_bit_lock(0, &b->up);
                break;
index d7a7befeddd42c907bdf88a37e0b77515b3e8705..cbe39e8db39c49180dcbcbf689f5d2e29b03e406 100644 (file)
@@ -918,3 +918,35 @@ void tipc_group_member_evt(struct tipc_group *grp,
        }
        *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
 }
+
+int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
+{
+       struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP);
+
+       if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
+                       grp->type) ||
+           nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
+                       grp->instance) ||
+           nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT,
+                       grp->bc_snd_nxt))
+               goto group_msg_cancel;
+
+       if (grp->scope == TIPC_NODE_SCOPE)
+               if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_NODE_SCOPE))
+                       goto group_msg_cancel;
+
+       if (grp->scope == TIPC_CLUSTER_SCOPE)
+               if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE))
+                       goto group_msg_cancel;
+
+       if (*grp->open)
+               if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_OPEN))
+                       goto group_msg_cancel;
+
+       nla_nest_end(skb, group);
+       return 0;
+
+group_msg_cancel:
+       nla_nest_cancel(skb, group);
+       return -1;
+}
index 5996af6e9f1ddb72b565ae9288e685c83d6a1c0d..76b4e5a7b39deb83520932a7af678e7b25558be4 100644 (file)
@@ -72,4 +72,5 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
                               u32 port, struct sk_buff_head *xmitq);
 u16 tipc_group_bc_snd_nxt(struct tipc_group *grp);
 void tipc_group_update_member(struct tipc_member *m, int len);
+int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb);
 #endif
index 695acb783969730e448579772dfa3a17b3e827d6..df763be38541040406d2bd8a6712856947232e81 100644 (file)
@@ -106,7 +106,8 @@ struct tipc_stats {
  * @backlogq: queue for messages waiting to be sent
  * @snt_nxt: next sequence number to use for outbound messages
  * @last_retransmitted: sequence number of most recently retransmitted message
- * @stale_count: # of identical retransmit requests made by peer
+ * @stale_cnt: counter for number of identical retransmit attempts
+ * @stale_limit: time when repeated identical retransmits must force link reset
  * @ackers: # of peers that needs to ack each packet before it can be released
  * @acked: # last packet acked by a certain peer. Used for broadcast.
  * @rcv_nxt: next sequence number to expect for inbound messages
@@ -127,14 +128,17 @@ struct tipc_link {
        struct net *net;
 
        /* Management and link supervision data */
-       u32 peer_session;
-       u32 session;
+       u16 peer_session;
+       u16 session;
+       u16 snd_nxt_state;
+       u16 rcv_nxt_state;
        u32 peer_bearer_id;
        u32 bearer_id;
        u32 tolerance;
        u32 abort_limit;
        u32 state;
        u16 peer_caps;
+       bool in_session;
        bool active;
        u32 silent_intv_cnt;
        char if_name[TIPC_MAX_IF_NAME];
@@ -161,7 +165,8 @@ struct tipc_link {
        u16 snd_nxt;
        u16 last_retransm;
        u16 window;
-       u32 stale_count;
+       u16 stale_cnt;
+       unsigned long stale_limit;
 
        /* Reception */
        u16 rcv_nxt;
@@ -212,11 +217,6 @@ enum {
  */
 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
 
-/* Wildcard value for link session numbers. When it is known that
- * peer endpoint is down, any session number must be accepted.
- */
-#define ANY_SESSION 0x10000
-
 /* Link FSM states:
  */
 enum {
@@ -337,6 +337,11 @@ char tipc_link_plane(struct tipc_link *l)
        return l->net_plane;
 }
 
+void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
+{
+       l->peer_caps = capabilities;
+}
+
 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
                           struct tipc_link *uc_l,
                           struct sk_buff_head *xmitq)
@@ -469,7 +474,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
        l->addr = peer;
        l->peer_caps = peer_caps;
        l->net = net;
-       l->peer_session = ANY_SESSION;
+       l->in_session = false;
        l->bearer_id = bearer_id;
        l->tolerance = tolerance;
        l->net_plane = net_plane;
@@ -838,7 +843,7 @@ void link_prepare_wakeup(struct tipc_link *l)
 
 void tipc_link_reset(struct tipc_link *l)
 {
-       l->peer_session = ANY_SESSION;
+       l->in_session = false;
        l->session++;
        l->mtu = l->advertised_mtu;
        __skb_queue_purge(&l->transmq);
@@ -857,10 +862,12 @@ void tipc_link_reset(struct tipc_link *l)
        l->rcv_unacked = 0;
        l->snd_nxt = 1;
        l->rcv_nxt = 1;
+       l->snd_nxt_state = 1;
+       l->rcv_nxt_state = 1;
        l->acked = 0;
        l->silent_intv_cnt = 0;
        l->rst_cnt = 0;
-       l->stale_count = 0;
+       l->stale_cnt = 0;
        l->bc_peer_is_up = false;
        memset(&l->mon_state, 0, sizeof(l->mon_state));
        tipc_link_reset_stats(l);
@@ -997,39 +1004,41 @@ static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
                msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
 }
 
-int tipc_link_retrans(struct tipc_link *l, struct tipc_link *nacker,
+/* tipc_link_retrans() - retransmit one or more packets
+ * @l: the link to transmit on
+ * @r: the receiving link ordering the retransmit. Same as l if unicast
+ * @from: retransmit from (inclusive) this sequence number
+ * @to: retransmit to (inclusive) this sequence number
+ * xmitq: queue for accumulating the retransmitted packets
+ */
+int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
                      u16 from, u16 to, struct sk_buff_head *xmitq)
 {
        struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
-       struct tipc_msg *hdr;
-       u16 ack = l->rcv_nxt - 1;
        u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+       u16 ack = l->rcv_nxt - 1;
+       struct tipc_msg *hdr;
 
        if (!skb)
                return 0;
 
        /* Detect repeated retransmit failures on same packet */
-       if (nacker->last_retransm != buf_seqno(skb)) {
-               nacker->last_retransm = buf_seqno(skb);
-               nacker->stale_count = 1;
-       } else if (++nacker->stale_count > 100) {
+       if (r->last_retransm != buf_seqno(skb)) {
+               r->last_retransm = buf_seqno(skb);
+               r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance);
+       } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
                link_retransmit_failure(l, skb);
-               nacker->stale_count = 0;
                if (link_is_bc_sndlink(l))
                        return TIPC_LINK_DOWN_EVT;
                return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
        }
 
-       /* Move forward to where retransmission should start */
        skb_queue_walk(&l->transmq, skb) {
-               if (!less(buf_seqno(skb), from))
-                       break;
-       }
-
-       skb_queue_walk_from(&l->transmq, skb) {
-               if (more(buf_seqno(skb), to))
-                       break;
                hdr = buf_msg(skb);
+               if (less(msg_seqno(hdr), from))
+                       continue;
+               if (more(msg_seqno(hdr), to))
+                       break;
                _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
                if (!_skb)
                        return 0;
@@ -1063,6 +1072,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
                        skb_queue_tail(mc_inputq, skb);
                        return true;
                }
+               /* else: fall through */
        case CONN_MANAGER:
                skb_queue_tail(inputq, skb);
                return true;
@@ -1271,6 +1281,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
 
                /* Forward queues and wake up waiting users */
                if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
+                       l->stale_cnt = 0;
                        tipc_link_advance_backlog(l, xmitq);
                        if (unlikely(!skb_queue_empty(&l->wakeupq)))
                                link_prepare_wakeup(l);
@@ -1347,6 +1358,8 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
        msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
 
        if (mtyp == STATE_MSG) {
+               if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
+                       msg_set_seqno(hdr, l->snd_nxt_state++);
                msg_set_seq_gap(hdr, rcvgap);
                msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
                msg_set_probe(hdr, probe);
@@ -1438,6 +1451,44 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
        }
 }
 
+/* tipc_link_validate_msg(): validate message against current link state
+ * Returns true if message should be accepted, otherwise false
+ */
+bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
+{
+       u16 curr_session = l->peer_session;
+       u16 session = msg_session(hdr);
+       int mtyp = msg_type(hdr);
+
+       if (msg_user(hdr) != LINK_PROTOCOL)
+               return true;
+
+       switch (mtyp) {
+       case RESET_MSG:
+               if (!l->in_session)
+                       return true;
+               /* Accept only RESET with new session number */
+               return more(session, curr_session);
+       case ACTIVATE_MSG:
+               if (!l->in_session)
+                       return true;
+               /* Accept only ACTIVATE with new or current session number */
+               return !less(session, curr_session);
+       case STATE_MSG:
+               /* Accept only STATE with current session number */
+               if (!l->in_session)
+                       return false;
+               if (session != curr_session)
+                       return false;
+               if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
+                       return true;
+               /* Accept only STATE with new sequence number */
+               return !less(msg_seqno(hdr), l->rcv_nxt_state);
+       default:
+               return false;
+       }
+}
+
 /* tipc_link_proto_rcv(): receive link level protocol message :
  * Note that network plane id propagates through the network, and may
  * change at any time. The node with lowest numerical id determines
@@ -1471,17 +1522,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        hdr = buf_msg(skb);
        data = msg_data(hdr);
 
+       if (!tipc_link_validate_msg(l, hdr))
+               goto exit;
+
        switch (mtyp) {
        case RESET_MSG:
-
-               /* Ignore duplicate RESET with old session number */
-               if ((less_eq(msg_session(hdr), l->peer_session)) &&
-                   (l->peer_session != ANY_SESSION))
-                       break;
-               /* fall thru' */
-
        case ACTIVATE_MSG:
-
                /* Complete own link name with peer's interface name */
                if_name =  strrchr(l->name, ':') + 1;
                if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
@@ -1509,12 +1555,14 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                        rc = TIPC_LINK_UP_EVT;
 
                l->peer_session = msg_session(hdr);
+               l->in_session = true;
                l->peer_bearer_id = msg_bearer_id(hdr);
                if (l->mtu > msg_max_pkt(hdr))
                        l->mtu = msg_max_pkt(hdr);
                break;
 
        case STATE_MSG:
+               l->rcv_nxt_state = msg_seqno(hdr) + 1;
 
                /* Update own tolerance if peer indicates a non-zero value */
                if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
index ec59348a81e8b7a5311bb9ff25ab778ea91b0354..7bc494a33fdf1c3cdf8feb04b44db7e6e04a349c 100644 (file)
@@ -110,6 +110,8 @@ char *tipc_link_name(struct tipc_link *l);
 char tipc_link_plane(struct tipc_link *l);
 int tipc_link_prio(struct tipc_link *l);
 int tipc_link_window(struct tipc_link *l);
+void tipc_link_update_caps(struct tipc_link *l, u16 capabilities);
+bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr);
 unsigned long tipc_link_tolerance(struct tipc_link *l);
 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
                             struct sk_buff_head *xmitq);
index b6c45dccba3d2e7a3301167a90eecbf36d8ac07b..b61891054709597279d6204885a069b848dc869a 100644 (file)
@@ -416,26 +416,31 @@ bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
  */
 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
 {
-       struct tipc_msg *msg;
-       int imsz, offset;
+       struct tipc_msg *hdr, *ihdr;
+       int imsz;
 
        *iskb = NULL;
        if (unlikely(skb_linearize(skb)))
                goto none;
 
-       msg = buf_msg(skb);
-       offset = msg_hdr_sz(msg) + *pos;
-       if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE)))
+       hdr = buf_msg(skb);
+       if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
                goto none;
 
-       *iskb = skb_clone(skb, GFP_ATOMIC);
-       if (unlikely(!*iskb))
+       ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
+       imsz = msg_size(ihdr);
+
+       if ((*pos + imsz) > msg_data_sz(hdr))
                goto none;
-       skb_pull(*iskb, offset);
-       imsz = msg_size(buf_msg(*iskb));
-       skb_trim(*iskb, imsz);
+
+       *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
+       if (!*iskb)
+               goto none;
+
+       skb_copy_to_linear_data(*iskb, ihdr, imsz);
        if (unlikely(!tipc_msg_validate(iskb)))
                goto none;
+
        *pos += align(imsz);
        return true;
 none:
@@ -531,12 +536,6 @@ bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
                msg_set_hdr_sz(hdr, BASIC_H_SIZE);
        }
 
-       if (skb_cloned(_skb) &&
-           pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
-               goto exit;
-
-       /* reassign after skb header modifications */
-       hdr = buf_msg(_skb);
        /* Now reverse the concerned fields */
        msg_set_errcode(hdr, err);
        msg_set_non_seq(hdr, 0);
@@ -595,10 +594,6 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
        if (!skb_cloned(skb))
                return true;
 
-       /* Unclone buffer in case it was bundled */
-       if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
-               return false;
-
        return true;
 }
 
index 6a44eb812baf4a2fe31eeb55b04023f9f402666b..52fd80b0e7287568778deea89626c8bc3850cfc9 100644 (file)
@@ -45,6 +45,7 @@
 #include "netlink.h"
 
 #define INVALID_NODE_SIG       0x10000
+#define NODE_CLEANUP_AFTER     300000
 
 /* Flags used to take different actions according to flag type
  * TIPC_NOTIFY_NODE_DOWN: notify node is down
@@ -96,6 +97,7 @@ struct tipc_bclink_entry {
  * @link_id: local and remote bearer ids of changing link, if any
  * @publ_list: list of publications
  * @rcu: rcu struct for tipc_node
+ * @delete_at: indicates the time for deleting a down node
  */
 struct tipc_node {
        u32 addr;
@@ -121,6 +123,7 @@ struct tipc_node {
        unsigned long keepalive_intv;
        struct timer_list timer;
        struct rcu_head rcu;
+       unsigned long delete_at;
 };
 
 /* Node FSM states and events:
@@ -160,6 +163,7 @@ static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
 static void tipc_node_put(struct tipc_node *node);
 static bool node_is_up(struct tipc_node *n);
+static void tipc_node_delete_from_list(struct tipc_node *node);
 
 struct tipc_sock_conn {
        u32 port;
@@ -359,6 +363,8 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *n, *temp_node;
+       struct tipc_link *l;
+       int bearer_id;
        int i;
 
        spin_lock_bh(&tn->node_list_lock);
@@ -366,6 +372,11 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
        if (n) {
                /* Same node may come back with new capabilities */
                n->capabilities = capabilities;
+               for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+                       l = n->links[bearer_id].link;
+                       if (l)
+                               tipc_link_update_caps(l, capabilities);
+               }
                goto exit;
        }
        n = kzalloc(sizeof(*n), GFP_ATOMIC);
@@ -390,6 +401,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
        for (i = 0; i < MAX_BEARERS; i++)
                spin_lock_init(&n->links[i].lock);
        n->state = SELF_DOWN_PEER_LEAVING;
+       n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
        n->signature = INVALID_NODE_SIG;
        n->active_links[0] = INVALID_BEARER_ID;
        n->active_links[1] = INVALID_BEARER_ID;
@@ -433,11 +445,16 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
        tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
 }
 
-static void tipc_node_delete(struct tipc_node *node)
+static void tipc_node_delete_from_list(struct tipc_node *node)
 {
        list_del_rcu(&node->list);
        hlist_del_rcu(&node->hash);
        tipc_node_put(node);
+}
+
+static void tipc_node_delete(struct tipc_node *node)
+{
+       tipc_node_delete_from_list(node);
 
        del_timer_sync(&node->timer);
        tipc_node_put(node);
@@ -544,6 +561,42 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
        tipc_node_put(node);
 }
 
+static void  tipc_node_clear_links(struct tipc_node *node)
+{
+       int i;
+
+       for (i = 0; i < MAX_BEARERS; i++) {
+               struct tipc_link_entry *le = &node->links[i];
+
+               if (le->link) {
+                       kfree(le->link);
+                       le->link = NULL;
+                       node->link_cnt--;
+               }
+       }
+}
+
+/* tipc_node_cleanup - delete nodes that does not
+ * have active links for NODE_CLEANUP_AFTER time
+ */
+static int tipc_node_cleanup(struct tipc_node *peer)
+{
+       struct tipc_net *tn = tipc_net(peer->net);
+       bool deleted = false;
+
+       spin_lock_bh(&tn->node_list_lock);
+       tipc_node_write_lock(peer);
+
+       if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
+               tipc_node_clear_links(peer);
+               tipc_node_delete_from_list(peer);
+               deleted = true;
+       }
+       tipc_node_write_unlock(peer);
+       spin_unlock_bh(&tn->node_list_lock);
+       return deleted;
+}
+
 /* tipc_node_timeout - handle expiration of node timer
  */
 static void tipc_node_timeout(struct timer_list *t)
@@ -551,21 +604,29 @@ static void tipc_node_timeout(struct timer_list *t)
        struct tipc_node *n = from_timer(n, t, timer);
        struct tipc_link_entry *le;
        struct sk_buff_head xmitq;
+       int remains = n->link_cnt;
        int bearer_id;
        int rc = 0;
 
+       if (!node_is_up(n) && tipc_node_cleanup(n)) {
+               /*Removing the reference of Timer*/
+               tipc_node_put(n);
+               return;
+       }
+
        __skb_queue_head_init(&xmitq);
 
-       for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+       for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
                tipc_node_read_lock(n);
                le = &n->links[bearer_id];
-               spin_lock_bh(&le->lock);
                if (le->link) {
+                       spin_lock_bh(&le->lock);
                        /* Link tolerance may change asynchronously: */
                        tipc_node_calculate_timer(n, le->link);
                        rc = tipc_link_timeout(le->link, &xmitq);
+                       spin_unlock_bh(&le->lock);
+                       remains--;
                }
-               spin_unlock_bh(&le->lock);
                tipc_node_read_unlock(n);
                tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
                if (rc & TIPC_LINK_DOWN_EVT)
@@ -1171,6 +1232,7 @@ static void node_lost_contact(struct tipc_node *n,
        uint i;
 
        pr_debug("Lost contact with %x\n", n->addr);
+       n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
 
        /* Clean up broadcast state */
        tipc_bcast_remove_peer(n->net, n->bc_entry.link);
@@ -1478,7 +1540,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
  * tipc_node_check_state - check and if necessary update node state
  * @skb: TIPC packet
  * @bearer_id: identity of bearer delivering the packet
- * Returns true if state is ok, otherwise consumes buffer and returns false
+ * Returns true if state and msg are ok, otherwise false
  */
 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
                                  int bearer_id, struct sk_buff_head *xmitq)
@@ -1512,6 +1574,9 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
                }
        }
 
+       if (!tipc_link_validate_msg(l, hdr))
+               return false;
+
        /* Check and update node accesibility if applicable */
        if (state == SELF_UP_PEER_COMING) {
                if (!tipc_link_is_up(l))
@@ -1740,7 +1805,6 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
        struct tipc_node *peer;
        u32 addr;
        int err;
-       int i;
 
        /* We identify the peer by its net */
        if (!info->attrs[TIPC_NLA_NET])
@@ -1775,15 +1839,7 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
                goto err_out;
        }
 
-       for (i = 0; i < MAX_BEARERS; i++) {
-               struct tipc_link_entry *le = &peer->links[i];
-
-               if (le->link) {
-                       kfree(le->link);
-                       le->link = NULL;
-                       peer->link_cnt--;
-               }
-       }
+       tipc_node_clear_links(peer);
        tipc_node_write_unlock(peer);
        tipc_node_delete(peer);
 
index 846c8f240872f25c93af27edcb32692916223b4a..48b3298a248d493e083cb4e39adeebe3ab3f38a5 100644 (file)
@@ -49,14 +49,16 @@ enum {
        TIPC_BCAST_STATE_NACK = (1 << 2),
        TIPC_BLOCK_FLOWCTL    = (1 << 3),
        TIPC_BCAST_RCAST      = (1 << 4),
-       TIPC_NODE_ID128       = (1 << 5)
+       TIPC_NODE_ID128       = (1 << 5),
+       TIPC_LINK_PROTO_SEQNO = (1 << 6)
 };
 
-#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \
-                               TIPC_BCAST_STATE_NACK | \
-                               TIPC_BCAST_RCAST | \
-                               TIPC_BLOCK_FLOWCTL | \
-                               TIPC_NODE_ID128)
+#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH       |  \
+                               TIPC_BCAST_STATE_NACK  |  \
+                               TIPC_BCAST_RCAST       |  \
+                               TIPC_BLOCK_FLOWCTL     |  \
+                               TIPC_NODE_ID128        |  \
+                               TIPC_LINK_PROTO_SEQNO)
 #define INVALID_BEARER_ID -1
 
 void tipc_node_stop(struct net *net);
index 14a5d055717d2a7b95ea353b15f53dfb81a39515..3d21414ba357d6a35c28f420d9fd5dc7cde84bfd 100644 (file)
@@ -692,9 +692,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
 }
 
 /**
- * tipc_poll - read pollmask
+ * tipc_poll - read and possibly block on pollmask
  * @file: file structure associated with the socket
  * @sock: socket for which to calculate the poll bits
+ * @wait: ???
  *
  * Returns pollmask value
  *
@@ -708,12 +709,15 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
  * imply that the operation will succeed, merely that it should be performed
  * and will not block.
  */
-static __poll_t tipc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t tipc_poll(struct file *file, struct socket *sock,
+                             poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
        __poll_t revents = 0;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
@@ -3033,7 +3037,7 @@ static const struct proto_ops msg_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = sock_no_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = tipc_shutdown,
@@ -3054,7 +3058,7 @@ static const struct proto_ops packet_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = tipc_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
@@ -3075,7 +3079,7 @@ static const struct proto_ops stream_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = tipc_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
@@ -3316,6 +3320,11 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
                goto stat_msg_cancel;
 
        nla_nest_end(skb, stat);
+
+       if (tsk->group)
+               if (tipc_group_fill_sock_diag(tsk->group, skb))
+                       goto stat_msg_cancel;
+
        nla_nest_end(skb, attrs);
 
        return 0;
index a7a8f8e20ff3051b92b74e12fb3d6024676475fd..4995d84d228d4b8c5c076968fc46329febf34640 100644 (file)
@@ -52,9 +52,12 @@ static DEFINE_SPINLOCK(tls_device_lock);
 
 static void tls_device_free_ctx(struct tls_context *ctx)
 {
-       struct tls_offload_context *offload_ctx = tls_offload_ctx(ctx);
+       if (ctx->tx_conf == TLS_HW)
+               kfree(tls_offload_ctx_tx(ctx));
+
+       if (ctx->rx_conf == TLS_HW)
+               kfree(tls_offload_ctx_rx(ctx));
 
-       kfree(offload_ctx);
        kfree(ctx);
 }
 
@@ -71,10 +74,11 @@ static void tls_device_gc_task(struct work_struct *work)
        list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
                struct net_device *netdev = ctx->netdev;
 
-               if (netdev) {
+               if (netdev && ctx->tx_conf == TLS_HW) {
                        netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
                                                        TLS_OFFLOAD_CTX_DIR_TX);
                        dev_put(netdev);
+                       ctx->netdev = NULL;
                }
 
                list_del(&ctx->list);
@@ -82,6 +86,22 @@ static void tls_device_gc_task(struct work_struct *work)
        }
 }
 
+static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
+                             struct net_device *netdev)
+{
+       if (sk->sk_destruct != tls_device_sk_destruct) {
+               refcount_set(&ctx->refcount, 1);
+               dev_hold(netdev);
+               ctx->netdev = netdev;
+               spin_lock_irq(&tls_device_lock);
+               list_add_tail(&ctx->list, &tls_device_list);
+               spin_unlock_irq(&tls_device_lock);
+
+               ctx->sk_destruct = sk->sk_destruct;
+               sk->sk_destruct = tls_device_sk_destruct;
+       }
+}
+
 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
 {
        unsigned long flags;
@@ -125,7 +145,7 @@ static void destroy_record(struct tls_record_info *record)
        kfree(record);
 }
 
-static void delete_all_records(struct tls_offload_context *offload_ctx)
+static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
 {
        struct tls_record_info *info, *temp;
 
@@ -141,14 +161,14 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_record_info *info, *temp;
-       struct tls_offload_context *ctx;
+       struct tls_offload_context_tx *ctx;
        u64 deleted_records = 0;
        unsigned long flags;
 
        if (!tls_ctx)
                return;
 
-       ctx = tls_offload_ctx(tls_ctx);
+       ctx = tls_offload_ctx_tx(tls_ctx);
 
        spin_lock_irqsave(&ctx->lock, flags);
        info = ctx->retransmit_hint;
@@ -179,15 +199,17 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
 void tls_device_sk_destruct(struct sock *sk)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
-       struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
+       struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
 
-       if (ctx->open_record)
-               destroy_record(ctx->open_record);
+       tls_ctx->sk_destruct(sk);
 
-       delete_all_records(ctx);
-       crypto_free_aead(ctx->aead_send);
-       ctx->sk_destruct(sk);
-       clean_acked_data_disable(inet_csk(sk));
+       if (tls_ctx->tx_conf == TLS_HW) {
+               if (ctx->open_record)
+                       destroy_record(ctx->open_record);
+               delete_all_records(ctx);
+               crypto_free_aead(ctx->aead_send);
+               clean_acked_data_disable(inet_csk(sk));
+       }
 
        if (refcount_dec_and_test(&tls_ctx->refcount))
                tls_device_queue_ctx_destruction(tls_ctx);
@@ -219,7 +241,7 @@ static void tls_append_frag(struct tls_record_info *record,
 
 static int tls_push_record(struct sock *sk,
                           struct tls_context *ctx,
-                          struct tls_offload_context *offload_ctx,
+                          struct tls_offload_context_tx *offload_ctx,
                           struct tls_record_info *record,
                           struct page_frag *pfrag,
                           int flags,
@@ -264,7 +286,7 @@ static int tls_push_record(struct sock *sk,
        return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
 }
 
-static int tls_create_new_record(struct tls_offload_context *offload_ctx,
+static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
                                 struct page_frag *pfrag,
                                 size_t prepend_size)
 {
@@ -290,7 +312,7 @@ static int tls_create_new_record(struct tls_offload_context *offload_ctx,
 }
 
 static int tls_do_allocation(struct sock *sk,
-                            struct tls_offload_context *offload_ctx,
+                            struct tls_offload_context_tx *offload_ctx,
                             struct page_frag *pfrag,
                             size_t prepend_size)
 {
@@ -324,7 +346,7 @@ static int tls_push_data(struct sock *sk,
                         unsigned char record_type)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
-       struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
+       struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
        int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
        int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
        struct tls_record_info *record = ctx->open_record;
@@ -477,7 +499,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
        return rc;
 }
 
-struct tls_record_info *tls_get_record(struct tls_offload_context *context,
+struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
                                       u32 seq, u64 *p_record_sn)
 {
        u64 record_sn = context->hint_record_sn;
@@ -520,11 +542,123 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
        return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
 }
 
+void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct net_device *netdev = tls_ctx->netdev;
+       struct tls_offload_context_rx *rx_ctx;
+       u32 is_req_pending;
+       s64 resync_req;
+       u32 req_seq;
+
+       if (tls_ctx->rx_conf != TLS_HW)
+               return;
+
+       rx_ctx = tls_offload_ctx_rx(tls_ctx);
+       resync_req = atomic64_read(&rx_ctx->resync_req);
+       req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
+       is_req_pending = resync_req;
+
+       if (unlikely(is_req_pending) && req_seq == seq &&
+           atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
+               netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk,
+                                                     seq + TLS_HEADER_SIZE - 1,
+                                                     rcd_sn);
+}
+
+static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
+{
+       struct strp_msg *rxm = strp_msg(skb);
+       int err = 0, offset = rxm->offset, copy, nsg;
+       struct sk_buff *skb_iter, *unused;
+       struct scatterlist sg[1];
+       char *orig_buf, *buf;
+
+       orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
+                          TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
+       if (!orig_buf)
+               return -ENOMEM;
+       buf = orig_buf;
+
+       nsg = skb_cow_data(skb, 0, &unused);
+       if (unlikely(nsg < 0)) {
+               err = nsg;
+               goto free_buf;
+       }
+
+       sg_init_table(sg, 1);
+       sg_set_buf(&sg[0], buf,
+                  rxm->full_len + TLS_HEADER_SIZE +
+                  TLS_CIPHER_AES_GCM_128_IV_SIZE);
+       skb_copy_bits(skb, offset, buf,
+                     TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
+
+       /* We are interested only in the decrypted data not the auth */
+       err = decrypt_skb(sk, skb, sg);
+       if (err != -EBADMSG)
+               goto free_buf;
+       else
+               err = 0;
+
+       copy = min_t(int, skb_pagelen(skb) - offset,
+                    rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+
+       if (skb->decrypted)
+               skb_store_bits(skb, offset, buf, copy);
+
+       offset += copy;
+       buf += copy;
+
+       skb_walk_frags(skb, skb_iter) {
+               copy = min_t(int, skb_iter->len,
+                            rxm->full_len - offset + rxm->offset -
+                            TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+
+               if (skb_iter->decrypted)
+                       skb_store_bits(skb, offset, buf, copy);
+
+               offset += copy;
+               buf += copy;
+       }
+
+free_buf:
+       kfree(orig_buf);
+       return err;
+}
+
+int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
+       int is_decrypted = skb->decrypted;
+       int is_encrypted = !is_decrypted;
+       struct sk_buff *skb_iter;
+
+       /* Skip if it is already decrypted */
+       if (ctx->sw.decrypted)
+               return 0;
+
+       /* Check if all the data is decrypted already */
+       skb_walk_frags(skb, skb_iter) {
+               is_decrypted &= skb_iter->decrypted;
+               is_encrypted &= !skb_iter->decrypted;
+       }
+
+       ctx->sw.decrypted |= is_decrypted;
+
+       /* Return immedeatly if the record is either entirely plaintext or
+        * entirely ciphertext. Otherwise handle reencrypt partially decrypted
+        * record.
+        */
+       return (is_encrypted || is_decrypted) ? 0 :
+               tls_device_reencrypt(sk, skb);
+}
+
 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
 {
        u16 nonce_size, tag_size, iv_size, rec_seq_size;
        struct tls_record_info *start_marker_record;
-       struct tls_offload_context *offload_ctx;
+       struct tls_offload_context_tx *offload_ctx;
        struct tls_crypto_info *crypto_info;
        struct net_device *netdev;
        char *iv, *rec_seq;
@@ -546,7 +680,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
                goto out;
        }
 
-       offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE, GFP_KERNEL);
+       offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
        if (!offload_ctx) {
                rc = -ENOMEM;
                goto free_marker_record;
@@ -609,7 +743,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
 
        clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
        ctx->push_pending_record = tls_device_push_pending_record;
-       offload_ctx->sk_destruct = sk->sk_destruct;
 
        /* TLS offload is greatly simplified if we don't send
         * SKBs where only part of the payload needs to be encrypted.
@@ -619,8 +752,6 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
        if (skb)
                TCP_SKB_CB(skb)->eor = 1;
 
-       refcount_set(&ctx->refcount, 1);
-
        /* We support starting offload on multiple sockets
         * concurrently, so we only need a read lock here.
         * This lock must precede get_netdev_for_sock to prevent races between
@@ -655,19 +786,14 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
        if (rc)
                goto release_netdev;
 
-       ctx->netdev = netdev;
-
-       spin_lock_irq(&tls_device_lock);
-       list_add_tail(&ctx->list, &tls_device_list);
-       spin_unlock_irq(&tls_device_lock);
+       tls_device_attach(ctx, sk, netdev);
 
-       sk->sk_validate_xmit_skb = tls_validate_xmit_skb;
        /* following this assignment tls_is_sk_tx_device_offloaded
         * will return true and the context might be accessed
         * by the netdev's xmit function.
         */
-       smp_store_release(&sk->sk_destruct,
-                         &tls_device_sk_destruct);
+       smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
+       dev_put(netdev);
        up_read(&device_offload_lock);
        goto out;
 
@@ -690,6 +816,105 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
        return rc;
 }
 
+int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
+{
+       struct tls_offload_context_rx *context;
+       struct net_device *netdev;
+       int rc = 0;
+
+       /* We support starting offload on multiple sockets
+        * concurrently, so we only need a read lock here.
+        * This lock must precede get_netdev_for_sock to prevent races between
+        * NETDEV_DOWN and setsockopt.
+        */
+       down_read(&device_offload_lock);
+       netdev = get_netdev_for_sock(sk);
+       if (!netdev) {
+               pr_err_ratelimited("%s: netdev not found\n", __func__);
+               rc = -EINVAL;
+               goto release_lock;
+       }
+
+       if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
+               pr_err_ratelimited("%s: netdev %s with no TLS offload\n",
+                                  __func__, netdev->name);
+               rc = -ENOTSUPP;
+               goto release_netdev;
+       }
+
+       /* Avoid offloading if the device is down
+        * We don't want to offload new flows after
+        * the NETDEV_DOWN event
+        */
+       if (!(netdev->flags & IFF_UP)) {
+               rc = -EINVAL;
+               goto release_netdev;
+       }
+
+       context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
+       if (!context) {
+               rc = -ENOMEM;
+               goto release_netdev;
+       }
+
+       ctx->priv_ctx_rx = context;
+       rc = tls_set_sw_offload(sk, ctx, 0);
+       if (rc)
+               goto release_ctx;
+
+       rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
+                                            &ctx->crypto_recv,
+                                            tcp_sk(sk)->copied_seq);
+       if (rc) {
+               pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
+                                  __func__);
+               goto free_sw_resources;
+       }
+
+       tls_device_attach(ctx, sk, netdev);
+       goto release_netdev;
+
+free_sw_resources:
+       tls_sw_free_resources_rx(sk);
+release_ctx:
+       ctx->priv_ctx_rx = NULL;
+release_netdev:
+       dev_put(netdev);
+release_lock:
+       up_read(&device_offload_lock);
+       return rc;
+}
+
+void tls_device_offload_cleanup_rx(struct sock *sk)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct net_device *netdev;
+
+       down_read(&device_offload_lock);
+       netdev = tls_ctx->netdev;
+       if (!netdev)
+               goto out;
+
+       if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
+               pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n",
+                                  __func__);
+               goto out;
+       }
+
+       netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+                                       TLS_OFFLOAD_CTX_DIR_RX);
+
+       if (tls_ctx->tx_conf != TLS_HW) {
+               dev_put(netdev);
+               tls_ctx->netdev = NULL;
+       }
+out:
+       up_read(&device_offload_lock);
+       kfree(tls_ctx->rx.rec_seq);
+       kfree(tls_ctx->rx.iv);
+       tls_sw_release_resources_rx(sk);
+}
+
 static int tls_device_down(struct net_device *netdev)
 {
        struct tls_context *ctx, *tmp;
@@ -710,8 +935,12 @@ static int tls_device_down(struct net_device *netdev)
        spin_unlock_irqrestore(&tls_device_lock, flags);
 
        list_for_each_entry_safe(ctx, tmp, &list, list) {
-               netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
-                                               TLS_OFFLOAD_CTX_DIR_TX);
+               if (ctx->tx_conf == TLS_HW)
+                       netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+                                                       TLS_OFFLOAD_CTX_DIR_TX);
+               if (ctx->rx_conf == TLS_HW)
+                       netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
+                                                       TLS_OFFLOAD_CTX_DIR_RX);
                ctx->netdev = NULL;
                dev_put(netdev);
                list_del_init(&ctx->list);
@@ -732,12 +961,16 @@ static int tls_dev_event(struct notifier_block *this, unsigned long event,
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
-       if (!(dev->features & NETIF_F_HW_TLS_TX))
+       if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
                return NOTIFY_DONE;
 
        switch (event) {
        case NETDEV_REGISTER:
        case NETDEV_FEAT_CHANGE:
+               if ((dev->features & NETIF_F_HW_TLS_RX) &&
+                   !dev->tlsdev_ops->tls_dev_resync_rx)
+                       return NOTIFY_BAD;
+
                if  (dev->tlsdev_ops &&
                     dev->tlsdev_ops->tls_dev_add &&
                     dev->tlsdev_ops->tls_dev_del)
index 748914abdb604e6434db7d8326e8a8480567e1da..e3313c45663f6debd5bd8bfa78b290f39acd8a7d 100644 (file)
@@ -214,7 +214,7 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
 
 static int fill_sg_in(struct scatterlist *sg_in,
                      struct sk_buff *skb,
-                     struct tls_offload_context *ctx,
+                     struct tls_offload_context_tx *ctx,
                      u64 *rcd_sn,
                      s32 *sync_size,
                      int *resync_sgs)
@@ -299,7 +299,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
                                   s32 sync_size, u64 rcd_sn)
 {
        int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
-       struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
+       struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
        int payload_len = skb->len - tcp_payload_offset;
        void *buf, *iv, *aad, *dummy_buf;
        struct aead_request *aead_req;
@@ -361,7 +361,7 @@ static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
 {
        int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
        struct tls_context *tls_ctx = tls_get_ctx(sk);
-       struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
+       struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
        int payload_len = skb->len - tcp_payload_offset;
        struct scatterlist *sg_in, sg_out[3];
        struct sk_buff *nskb = NULL;
@@ -413,9 +413,10 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
 
        return tls_sw_fallback(sk, skb);
 }
+EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
 
 int tls_sw_fallback_init(struct sock *sk,
-                        struct tls_offload_context *offload_ctx,
+                        struct tls_offload_context_tx *offload_ctx,
                         struct tls_crypto_info *crypto_info)
 {
        const u8 *key;
index a127d61e8af984d3aaefde49c94f48a9a9187d53..b09867c8b8179f06634d3e614c90d2f4b56cf75e 100644 (file)
@@ -51,15 +51,6 @@ enum {
        TLSV6,
        TLS_NUM_PROTS,
 };
-enum {
-       TLS_BASE,
-       TLS_SW,
-#ifdef CONFIG_TLS_DEVICE
-       TLS_HW,
-#endif
-       TLS_HW_RECORD,
-       TLS_NUM_CONFIG,
-};
 
 static struct proto *saved_tcpv6_prot;
 static DEFINE_MUTEX(tcpv6_prot_mutex);
@@ -290,7 +281,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
        }
 
 #ifdef CONFIG_TLS_DEVICE
-       if (ctx->tx_conf != TLS_HW) {
+       if (ctx->rx_conf == TLS_HW)
+               tls_device_offload_cleanup_rx(sk);
+
+       if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
 #else
        {
 #endif
@@ -470,8 +464,16 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
                        conf = TLS_SW;
                }
        } else {
-               rc = tls_set_sw_offload(sk, ctx, 0);
-               conf = TLS_SW;
+#ifdef CONFIG_TLS_DEVICE
+               rc = tls_set_device_offload_rx(sk, ctx);
+               conf = TLS_HW;
+               if (rc) {
+#else
+               {
+#endif
+                       rc = tls_set_sw_offload(sk, ctx, 0);
+                       conf = TLS_SW;
+               }
        }
 
        if (rc)
@@ -629,6 +631,12 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
        prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
        prot[TLS_HW][TLS_SW].sendmsg            = tls_device_sendmsg;
        prot[TLS_HW][TLS_SW].sendpage           = tls_device_sendpage;
+
+       prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
+
+       prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
+
+       prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
 #endif
 
        prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
@@ -712,7 +720,7 @@ static int __init tls_register(void)
        build_protos(tls_prots[TLSV4], &tcp_prot);
 
        tls_sw_proto_ops = inet_stream_ops;
-       tls_sw_proto_ops.poll_mask = tls_sw_poll_mask;
+       tls_sw_proto_ops.poll = tls_sw_poll;
        tls_sw_proto_ops.splice_read = tls_sw_splice_read;
 
 #ifdef CONFIG_TLS_DEVICE
index f127fac88acfe0046b0a7dd55bab4d6d486de105..7d194c0cd6cf11d50783d8bd268e543002ed02d7 100644 (file)
@@ -53,18 +53,14 @@ static int tls_do_decryption(struct sock *sk,
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
-       struct strp_msg *rxm = strp_msg(skb);
        struct aead_request *aead_req;
 
        int ret;
-       unsigned int req_size = sizeof(struct aead_request) +
-               crypto_aead_reqsize(ctx->aead_recv);
 
-       aead_req = kzalloc(req_size, flags);
+       aead_req = aead_request_alloc(ctx->aead_recv, flags);
        if (!aead_req)
                return -ENOMEM;
 
-       aead_request_set_tfm(aead_req, ctx->aead_recv);
        aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
        aead_request_set_crypt(aead_req, sgin, sgout,
                               data_len + tls_ctx->rx.tag_size,
@@ -74,19 +70,7 @@ static int tls_do_decryption(struct sock *sk,
 
        ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
 
-       if (ret < 0)
-               goto out;
-
-       rxm->offset += tls_ctx->rx.prepend_size;
-       rxm->full_len -= tls_ctx->rx.overhead_size;
-       tls_advance_record_sn(sk, &tls_ctx->rx);
-
-       ctx->decrypted = true;
-
-       ctx->saved_data_ready(sk);
-
-out:
-       kfree(aead_req);
+       aead_request_free(aead_req);
        return ret;
 }
 
@@ -224,8 +208,7 @@ static int tls_push_record(struct sock *sk, int flags,
        struct aead_request *req;
        int rc;
 
-       req = kzalloc(sizeof(struct aead_request) +
-                     crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
+       req = aead_request_alloc(ctx->aead_send, sk->sk_allocation);
        if (!req)
                return -ENOMEM;
 
@@ -267,7 +250,7 @@ static int tls_push_record(struct sock *sk, int flags,
 
        tls_advance_record_sn(sk, &tls_ctx->tx);
 out_req:
-       kfree(req);
+       aead_request_free(req);
        return rc;
 }
 
@@ -280,7 +263,7 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
                              int length, int *pages_used,
                              unsigned int *size_used,
                              struct scatterlist *to, int to_max_pages,
-                             bool charge)
+                             bool charge, bool revert)
 {
        struct page *pages[MAX_SKB_FRAGS];
 
@@ -331,6 +314,8 @@ static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
 out:
        *size_used = size;
        *pages_used = num_elem;
+       if (revert)
+               iov_iter_revert(from, size);
 
        return rc;
 }
@@ -432,7 +417,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                                &ctx->sg_plaintext_size,
                                ctx->sg_plaintext_data,
                                ARRAY_SIZE(ctx->sg_plaintext_data),
-                               true);
+                               true, false);
                        if (ret)
                                goto fallback_to_reg_send;
 
@@ -670,8 +655,38 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
        return skb;
 }
 
-static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
-                      struct scatterlist *sgout)
+static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
+                             struct scatterlist *sgout, bool *zc)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+       struct strp_msg *rxm = strp_msg(skb);
+       int err = 0;
+
+#ifdef CONFIG_TLS_DEVICE
+       err = tls_device_decrypted(sk, skb);
+       if (err < 0)
+               return err;
+#endif
+       if (!ctx->decrypted) {
+               err = decrypt_skb(sk, skb, sgout);
+               if (err < 0)
+                       return err;
+       } else {
+               *zc = false;
+       }
+
+       rxm->offset += tls_ctx->rx.prepend_size;
+       rxm->full_len -= tls_ctx->rx.overhead_size;
+       tls_advance_record_sn(sk, &tls_ctx->rx);
+       ctx->decrypted = true;
+       ctx->saved_data_ready(sk);
+
+       return err;
+}
+
+int decrypt_skb(struct sock *sk, struct sk_buff *skb,
+               struct scatterlist *sgout)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@ -812,11 +827,11 @@ int tls_sw_recvmsg(struct sock *sk,
                                err = zerocopy_from_iter(sk, &msg->msg_iter,
                                                         to_copy, &pages,
                                                         &chunk, &sgin[1],
-                                                        MAX_SKB_FRAGS, false);
+                                                        MAX_SKB_FRAGS, false, true);
                                if (err < 0)
                                        goto fallback_to_reg_recv;
 
-                               err = decrypt_skb(sk, skb, sgin);
+                               err = decrypt_skb_update(sk, skb, sgin, &zc);
                                for (; pages > 0; pages--)
                                        put_page(sg_page(&sgin[pages]));
                                if (err < 0) {
@@ -825,7 +840,7 @@ int tls_sw_recvmsg(struct sock *sk,
                                }
                        } else {
 fallback_to_reg_recv:
-                               err = decrypt_skb(sk, skb, NULL);
+                               err = decrypt_skb_update(sk, skb, NULL, &zc);
                                if (err < 0) {
                                        tls_err_abort(sk, EBADMSG);
                                        goto recv_end;
@@ -880,6 +895,7 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
        int err = 0;
        long timeo;
        int chunk;
+       bool zc;
 
        lock_sock(sk);
 
@@ -896,7 +912,7 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
        }
 
        if (!ctx->decrypted) {
-               err = decrypt_skb(sk, skb, NULL);
+               err = decrypt_skb_update(sk, skb, NULL, &zc);
 
                if (err < 0) {
                        tls_err_abort(sk, EBADMSG);
@@ -919,29 +935,30 @@ ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
        return copied ? : err;
 }
 
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events)
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+                        struct poll_table_struct *wait)
 {
+       unsigned int ret;
        struct sock *sk = sock->sk;
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
-       __poll_t mask;
 
-       /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */
-       mask = ctx->sk_poll_mask(sock, events);
+       /* Grab POLLOUT and POLLHUP from the underlying socket */
+       ret = ctx->sk_poll(file, sock, wait);
 
-       /* Clear EPOLLIN bits, and set based on recv_pkt */
-       mask &= ~(EPOLLIN | EPOLLRDNORM);
+       /* Clear POLLIN bits, and set based on recv_pkt */
+       ret &= ~(POLLIN | POLLRDNORM);
        if (ctx->recv_pkt)
-               mask |= EPOLLIN | EPOLLRDNORM;
+               ret |= POLLIN | POLLRDNORM;
 
-       return mask;
+       return ret;
 }
 
 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
 {
        struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
-       char header[tls_ctx->rx.prepend_size];
+       char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
        struct strp_msg *rxm = strp_msg(skb);
        size_t cipher_overhead;
        size_t data_len = 0;
@@ -951,6 +968,12 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
        if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
                return 0;
 
+       /* Sanity-check size of on-stack buffer. */
+       if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
+               ret = -EINVAL;
+               goto read_failure;
+       }
+
        /* Linearize header to local buffer */
        ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
 
@@ -978,6 +1001,10 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
                goto read_failure;
        }
 
+#ifdef CONFIG_TLS_DEVICE
+       handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
+                            *(u64*)tls_ctx->rx.rec_seq);
+#endif
        return data_len + TLS_HEADER_SIZE;
 
 read_failure:
@@ -990,9 +1017,6 @@ static void tls_queue(struct strparser *strp, struct sk_buff *skb)
 {
        struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
-       struct strp_msg *rxm;
-
-       rxm = strp_msg(skb);
 
        ctx->decrypted = false;
 
@@ -1022,7 +1046,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
        kfree(ctx);
 }
 
-void tls_sw_free_resources_rx(struct sock *sk)
+void tls_sw_release_resources_rx(struct sock *sk)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@ -1041,6 +1065,14 @@ void tls_sw_free_resources_rx(struct sock *sk)
                strp_done(&ctx->strp);
                lock_sock(sk);
        }
+}
+
+void tls_sw_free_resources_rx(struct sock *sk)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+
+       tls_sw_release_resources_rx(sk);
 
        kfree(ctx);
 }
@@ -1065,28 +1097,38 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
        }
 
        if (tx) {
-               sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
-               if (!sw_ctx_tx) {
-                       rc = -ENOMEM;
-                       goto out;
+               if (!ctx->priv_ctx_tx) {
+                       sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
+                       if (!sw_ctx_tx) {
+                               rc = -ENOMEM;
+                               goto out;
+                       }
+                       ctx->priv_ctx_tx = sw_ctx_tx;
+               } else {
+                       sw_ctx_tx =
+                               (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
                }
-               crypto_init_wait(&sw_ctx_tx->async_wait);
-               ctx->priv_ctx_tx = sw_ctx_tx;
        } else {
-               sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
-               if (!sw_ctx_rx) {
-                       rc = -ENOMEM;
-                       goto out;
+               if (!ctx->priv_ctx_rx) {
+                       sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
+                       if (!sw_ctx_rx) {
+                               rc = -ENOMEM;
+                               goto out;
+                       }
+                       ctx->priv_ctx_rx = sw_ctx_rx;
+               } else {
+                       sw_ctx_rx =
+                               (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
                }
-               crypto_init_wait(&sw_ctx_rx->async_wait);
-               ctx->priv_ctx_rx = sw_ctx_rx;
        }
 
        if (tx) {
+               crypto_init_wait(&sw_ctx_tx->async_wait);
                crypto_info = &ctx->crypto_send;
                cctx = &ctx->tx;
                aead = &sw_ctx_tx->aead_send;
        } else {
+               crypto_init_wait(&sw_ctx_rx->async_wait);
                crypto_info = &ctx->crypto_recv;
                cctx = &ctx->rx;
                aead = &sw_ctx_rx->aead_recv;
@@ -1111,7 +1153,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
        }
 
        /* Sanity-check the IV size for stack allocations. */
-       if (iv_size > MAX_IV_SIZE) {
+       if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
                rc = -EINVAL;
                goto free_priv;
        }
@@ -1191,7 +1233,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
                sk->sk_data_ready = tls_data_ready;
                write_unlock_bh(&sk->sk_callback_lock);
 
-               sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask;
+               sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
 
                strp_check_rcv(&sw_ctx_rx->strp);
        }
index 95b02a71fd47161735c51988463e5f5e4a7d44b3..e5473c03d667ad51308c3e8b705f3b1187f619e8 100644 (file)
@@ -638,8 +638,9 @@ static int unix_stream_connect(struct socket *, struct sockaddr *,
 static int unix_socketpair(struct socket *, struct socket *);
 static int unix_accept(struct socket *, struct socket *, int, bool);
 static int unix_getname(struct socket *, struct sockaddr *, int);
-static __poll_t unix_poll_mask(struct socket *, __poll_t);
-static __poll_t unix_dgram_poll_mask(struct socket *, __poll_t);
+static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
+static __poll_t unix_dgram_poll(struct file *, struct socket *,
+                                   poll_table *);
 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 static int unix_shutdown(struct socket *, int);
 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
@@ -680,7 +681,7 @@ static const struct proto_ops unix_stream_ops = {
        .socketpair =   unix_socketpair,
        .accept =       unix_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_poll_mask,
+       .poll =         unix_poll,
        .ioctl =        unix_ioctl,
        .listen =       unix_listen,
        .shutdown =     unix_shutdown,
@@ -703,7 +704,7 @@ static const struct proto_ops unix_dgram_ops = {
        .socketpair =   unix_socketpair,
        .accept =       sock_no_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_dgram_poll_mask,
+       .poll =         unix_dgram_poll,
        .ioctl =        unix_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     unix_shutdown,
@@ -725,7 +726,7 @@ static const struct proto_ops unix_seqpacket_ops = {
        .socketpair =   unix_socketpair,
        .accept =       unix_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_dgram_poll_mask,
+       .poll =         unix_dgram_poll,
        .ioctl =        unix_ioctl,
        .listen =       unix_listen,
        .shutdown =     unix_shutdown,
@@ -2629,10 +2630,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        return err;
 }
 
-static __poll_t unix_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err)
@@ -2661,11 +2665,15 @@ static __poll_t unix_poll_mask(struct socket *sock, __poll_t events)
        return mask;
 }
 
-static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+                                   poll_table *wait)
 {
        struct sock *sk = sock->sk, *other;
-       int writable;
-       __poll_t mask = 0;
+       unsigned int writable;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -2691,7 +2699,7 @@ static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events)
        }
 
        /* No write status requested, avoid expensive OUT tests. */
-       if (!(events & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
+       if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
                return mask;
 
        writable = unix_writable(sk);
index bb5d5fa68c357af4962602b2bced2164c6e5ab44..c1076c19b8580688ff041f71aee0d05ce0906030 100644 (file)
@@ -850,11 +850,18 @@ static int vsock_shutdown(struct socket *sock, int mode)
        return err;
 }
 
-static __poll_t vsock_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t vsock_poll(struct file *file, struct socket *sock,
+                              poll_table *wait)
 {
-       struct sock *sk = sock->sk;
-       struct vsock_sock *vsk = vsock_sk(sk);
-       __poll_t mask = 0;
+       struct sock *sk;
+       __poll_t mask;
+       struct vsock_sock *vsk;
+
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+
+       poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        if (sk->sk_err)
                /* Signify that there has been an error on this socket. */
@@ -1084,7 +1091,7 @@ static const struct proto_ops vsock_dgram_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = vsock_getname,
-       .poll_mask = vsock_poll_mask,
+       .poll = vsock_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = vsock_shutdown,
@@ -1842,7 +1849,7 @@ static const struct proto_ops vsock_stream_ops = {
        .socketpair = sock_no_socketpair,
        .accept = vsock_accept,
        .getname = vsock_getname,
-       .poll_mask = vsock_poll_mask,
+       .poll = vsock_poll,
        .ioctl = sock_no_ioctl,
        .listen = vsock_listen,
        .shutdown = vsock_shutdown,
index 8e03bd3f3668b573c4d61a786e90a238abe9fe66..5d3cce9e8744d5207753107aeb55518f2848f50a 100644 (file)
@@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
                return -ENODEV;
        }
 
-       if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
+       if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
                return virtio_transport_send_pkt_loopback(vsock, pkt);
 
        if (pkt->reply)
index 48e8097339ab44cca29bc9bbc938b58ea3a43333..a88551f3bc43f201cfd09ec7dc93ed2a42e93cf8 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright 2006-2010         Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright 2015      Intel Deutschland GmbH
+ * Copyright 2015-2017 Intel Deutschland GmbH
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -744,6 +744,8 @@ int wiphy_register(struct wiphy *wiphy)
 
        /* sanity check supported bands/channels */
        for (band = 0; band < NUM_NL80211_BANDS; band++) {
+               u16 types = 0;
+
                sband = wiphy->bands[band];
                if (!sband)
                        continue;
@@ -788,6 +790,23 @@ int wiphy_register(struct wiphy *wiphy)
                        sband->channels[i].band = band;
                }
 
+               for (i = 0; i < sband->n_iftype_data; i++) {
+                       const struct ieee80211_sband_iftype_data *iftd;
+
+                       iftd = &sband->iftype_data[i];
+
+                       if (WARN_ON(!iftd->types_mask))
+                               return -EINVAL;
+                       if (WARN_ON(types & iftd->types_mask))
+                               return -EINVAL;
+
+                       /* at least one piece of information must be present */
+                       if (WARN_ON(!iftd->he_cap.has_he))
+                               return -EINVAL;
+
+                       types |= iftd->types_mask;
+               }
+
                have_band = true;
        }
 
index 63eb1b5fdd04561169618ca30110aea5225d92cc..7f52ef56932035fe0d0a503128a9e24f88c0dd22 100644 (file)
@@ -76,7 +76,7 @@ struct cfg80211_registered_device {
        struct cfg80211_scan_request *scan_req; /* protected by RTNL */
        struct sk_buff *scan_msg;
        struct list_head sched_scan_req_list;
-       unsigned long suspend_at;
+       time64_t suspend_at;
        struct work_struct scan_done_wk;
 
        struct genl_info *cur_cmd_info;
index c7bbe5f0aae8839bdfe5ac7b7bd02c6aad8ac8dc..e4e5f025d16b469d175b04aa85bd20ac4a492448 100644 (file)
@@ -428,6 +428,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 },
        [NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
        [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
+       [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY,
+                                        .len = NL80211_HE_MAX_CAPABILITY_LEN },
 };
 
 /* policy for the key attributes */
@@ -1324,6 +1326,34 @@ static int nl80211_send_coalesce(struct sk_buff *msg,
        return 0;
 }
 
+static int
+nl80211_send_iftype_data(struct sk_buff *msg,
+                        const struct ieee80211_sband_iftype_data *iftdata)
+{
+       const struct ieee80211_sta_he_cap *he_cap = &iftdata->he_cap;
+
+       if (nl80211_put_iftypes(msg, NL80211_BAND_IFTYPE_ATTR_IFTYPES,
+                               iftdata->types_mask))
+               return -ENOBUFS;
+
+       if (he_cap->has_he) {
+               if (nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC,
+                           sizeof(he_cap->he_cap_elem.mac_cap_info),
+                           he_cap->he_cap_elem.mac_cap_info) ||
+                   nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY,
+                           sizeof(he_cap->he_cap_elem.phy_cap_info),
+                           he_cap->he_cap_elem.phy_cap_info) ||
+                   nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
+                           sizeof(he_cap->he_mcs_nss_supp),
+                           &he_cap->he_mcs_nss_supp) ||
+                   nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
+                           sizeof(he_cap->ppe_thres), he_cap->ppe_thres))
+                       return -ENOBUFS;
+       }
+
+       return 0;
+}
+
 static int nl80211_send_band_rateinfo(struct sk_buff *msg,
                                      struct ieee80211_supported_band *sband)
 {
@@ -1353,6 +1383,32 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg,
                         sband->vht_cap.cap)))
                return -ENOBUFS;
 
+       if (sband->n_iftype_data) {
+               struct nlattr *nl_iftype_data =
+                       nla_nest_start(msg, NL80211_BAND_ATTR_IFTYPE_DATA);
+               int err;
+
+               if (!nl_iftype_data)
+                       return -ENOBUFS;
+
+               for (i = 0; i < sband->n_iftype_data; i++) {
+                       struct nlattr *iftdata;
+
+                       iftdata = nla_nest_start(msg, i + 1);
+                       if (!iftdata)
+                               return -ENOBUFS;
+
+                       err = nl80211_send_iftype_data(msg,
+                                                      &sband->iftype_data[i]);
+                       if (err)
+                               return err;
+
+                       nla_nest_end(msg, iftdata);
+               }
+
+               nla_nest_end(msg, nl_iftype_data);
+       }
+
        /* add bitrates */
        nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES);
        if (!nl_rates)
@@ -2757,7 +2813,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
            nla_put_u32(msg, NL80211_ATTR_GENERATION,
                        rdev->devlist_generation ^
-                       (cfg80211_rdev_list_generation << 2)))
+                       (cfg80211_rdev_list_generation << 2)) ||
+           nla_put_u8(msg, NL80211_ATTR_4ADDR, wdev->use_4addr))
                goto nla_put_failure;
 
        if (rdev->ops->get_channel) {
@@ -4471,6 +4528,9 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
        case RATE_INFO_BW_160:
                rate_flg = NL80211_RATE_INFO_160_MHZ_WIDTH;
                break;
+       case RATE_INFO_BW_HE_RU:
+               rate_flg = 0;
+               WARN_ON(!(info->flags & RATE_INFO_FLAGS_HE_MCS));
        }
 
        if (rate_flg && nla_put_flag(msg, rate_flg))
@@ -4490,6 +4550,19 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
                if (info->flags & RATE_INFO_FLAGS_SHORT_GI &&
                    nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI))
                        return false;
+       } else if (info->flags & RATE_INFO_FLAGS_HE_MCS) {
+               if (nla_put_u8(msg, NL80211_RATE_INFO_HE_MCS, info->mcs))
+                       return false;
+               if (nla_put_u8(msg, NL80211_RATE_INFO_HE_NSS, info->nss))
+                       return false;
+               if (nla_put_u8(msg, NL80211_RATE_INFO_HE_GI, info->he_gi))
+                       return false;
+               if (nla_put_u8(msg, NL80211_RATE_INFO_HE_DCM, info->he_dcm))
+                       return false;
+               if (info->bw == RATE_INFO_BW_HE_RU &&
+                   nla_put_u8(msg, NL80211_RATE_INFO_HE_RU_ALLOC,
+                              info->he_ru_alloc))
+                       return false;
        }
 
        nla_nest_end(msg, rate);
@@ -4546,13 +4619,13 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
 
 #define PUT_SINFO(attr, memb, type) do {                               \
        BUILD_BUG_ON(sizeof(type) == sizeof(u64));                      \
-       if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) &&      \
+       if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) &&       \
            nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr,            \
                             sinfo->memb))                              \
                goto nla_put_failure;                                   \
        } while (0)
 #define PUT_SINFO_U64(attr, memb) do {                                 \
-       if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) &&      \
+       if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) &&       \
            nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr,           \
                              sinfo->memb, NL80211_STA_INFO_PAD))       \
                goto nla_put_failure;                                   \
@@ -4561,14 +4634,14 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
        PUT_SINFO(CONNECTED_TIME, connected_time, u32);
        PUT_SINFO(INACTIVE_TIME, inactive_time, u32);
 
-       if (sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES) |
-                            BIT(NL80211_STA_INFO_RX_BYTES64)) &&
+       if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+                            BIT_ULL(NL80211_STA_INFO_RX_BYTES64)) &&
            nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
                        (u32)sinfo->rx_bytes))
                goto nla_put_failure;
 
-       if (sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES) |
-                            BIT(NL80211_STA_INFO_TX_BYTES64)) &&
+       if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+                            BIT_ULL(NL80211_STA_INFO_TX_BYTES64)) &&
            nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
                        (u32)sinfo->tx_bytes))
                goto nla_put_failure;
@@ -4588,24 +4661,24 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
        default:
                break;
        }
-       if (sinfo->filled & BIT(NL80211_STA_INFO_CHAIN_SIGNAL)) {
+       if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) {
                if (!nl80211_put_signal(msg, sinfo->chains,
                                        sinfo->chain_signal,
                                        NL80211_STA_INFO_CHAIN_SIGNAL))
                        goto nla_put_failure;
        }
-       if (sinfo->filled & BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) {
+       if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) {
                if (!nl80211_put_signal(msg, sinfo->chains,
                                        sinfo->chain_signal_avg,
                                        NL80211_STA_INFO_CHAIN_SIGNAL_AVG))
                        goto nla_put_failure;
        }
-       if (sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE)) {
+       if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) {
                if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
                                          NL80211_STA_INFO_TX_BITRATE))
                        goto nla_put_failure;
        }
-       if (sinfo->filled & BIT(NL80211_STA_INFO_RX_BITRATE)) {
+       if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) {
                if (!nl80211_put_sta_rate(msg, &sinfo->rxrate,
                                          NL80211_STA_INFO_RX_BITRATE))
                        goto nla_put_failure;
@@ -4621,7 +4694,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
        PUT_SINFO(PEER_PM, peer_pm, u32);
        PUT_SINFO(NONPEER_PM, nonpeer_pm, u32);
 
-       if (sinfo->filled & BIT(NL80211_STA_INFO_BSS_PARAM)) {
+       if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) {
                bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
                if (!bss_param)
                        goto nla_put_failure;
@@ -4640,7 +4713,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
 
                nla_nest_end(msg, bss_param);
        }
-       if ((sinfo->filled & BIT(NL80211_STA_INFO_STA_FLAGS)) &&
+       if ((sinfo->filled & BIT_ULL(NL80211_STA_INFO_STA_FLAGS)) &&
            nla_put(msg, NL80211_STA_INFO_STA_FLAGS,
                    sizeof(struct nl80211_sta_flag_update),
                    &sinfo->sta_flags))
@@ -4886,7 +4959,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
                        return -EINVAL;
                if (params->supported_rates)
                        return -EINVAL;
-               if (params->ext_capab || params->ht_capa || params->vht_capa)
+               if (params->ext_capab || params->ht_capa || params->vht_capa ||
+                   params->he_capa)
                        return -EINVAL;
        }
 
@@ -5092,6 +5166,15 @@ static int nl80211_set_station_tdls(struct genl_info *info,
        if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
                params->vht_capa =
                        nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
+       if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) {
+               params->he_capa =
+                       nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+               params->he_capa_len =
+                       nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+
+               if (params->he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN)
+                       return -EINVAL;
+       }
 
        err = nl80211_parse_sta_channel_info(info, params);
        if (err)
@@ -5319,6 +5402,17 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
                params.vht_capa =
                        nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
 
+       if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) {
+               params.he_capa =
+                       nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+               params.he_capa_len =
+                       nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+
+               /* max len is validated in nla policy */
+               if (params.he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN)
+                       return -EINVAL;
+       }
+
        if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
                params.opmode_notif_used = true;
                params.opmode_notif =
@@ -5351,6 +5445,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
        if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) {
                params.ht_capa = NULL;
                params.vht_capa = NULL;
+
+               /* HE requires WME */
+               if (params.he_capa_len)
+                       return -EINVAL;
        }
 
        /* When you run into this, adjust the code below for the new flag */
@@ -6231,7 +6329,7 @@ do {                                                                          \
                                  nl80211_check_s32);
        /*
         * Check HT operation mode based on
-        * IEEE 802.11 2012 8.4.2.59 HT Operation element.
+        * IEEE 802.11-2016 9.4.2.57 HT Operation element.
         */
        if (tb[NL80211_MESHCONF_HT_OPMODE]) {
                ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
@@ -6241,22 +6339,9 @@ do {                                                                         \
                                  IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
                        return -EINVAL;
 
-               if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
-                   (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-                       return -EINVAL;
+               /* NON_HT_STA bit is reserved, but some programs set it */
+               ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
 
-               switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
-               case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
-                       if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
-                               return -EINVAL;
-                       break;
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
-                       if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-                               return -EINVAL;
-                       break;
-               }
                cfg->ht_opmode = ht_opmode;
                mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
        }
@@ -6861,6 +6946,16 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev)
        return regulatory_pre_cac_allowed(wdev->wiphy);
 }
 
+static bool nl80211_check_scan_feat(struct wiphy *wiphy, u32 flags, u32 flag,
+                                   enum nl80211_ext_feature_index feat)
+{
+       if (!(flags & flag))
+               return true;
+       if (wiphy_ext_feature_isset(wiphy, feat))
+               return true;
+       return false;
+}
+
 static int
 nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev,
                         void *request, struct nlattr **attrs,
@@ -6895,15 +6990,33 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev,
 
        if (((*flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
             !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) ||
-           ((*flags & NL80211_SCAN_FLAG_LOW_SPAN) &&
-            !wiphy_ext_feature_isset(wiphy,
-                                     NL80211_EXT_FEATURE_LOW_SPAN_SCAN)) ||
-           ((*flags & NL80211_SCAN_FLAG_LOW_POWER) &&
-            !wiphy_ext_feature_isset(wiphy,
-                                     NL80211_EXT_FEATURE_LOW_POWER_SCAN)) ||
-           ((*flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) &&
-            !wiphy_ext_feature_isset(wiphy,
-                                     NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN)))
+           !nl80211_check_scan_feat(wiphy, *flags,
+                                    NL80211_SCAN_FLAG_LOW_SPAN,
+                                    NL80211_EXT_FEATURE_LOW_SPAN_SCAN) ||
+           !nl80211_check_scan_feat(wiphy, *flags,
+                                    NL80211_SCAN_FLAG_LOW_POWER,
+                                    NL80211_EXT_FEATURE_LOW_POWER_SCAN) ||
+           !nl80211_check_scan_feat(wiphy, *flags,
+                                    NL80211_SCAN_FLAG_HIGH_ACCURACY,
+                                    NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN) ||
+           !nl80211_check_scan_feat(wiphy, *flags,
+                                    NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME,
+                                    NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME) ||
+           !nl80211_check_scan_feat(wiphy, *flags,
+                                    NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP,
+                                    NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP) ||
+           !nl80211_check_scan_feat(wiphy, *flags,
+                                    NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION,
+                                    NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) ||
+           !nl80211_check_scan_feat(wiphy, *flags,
+                                    NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE,
+                                    NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE) ||
+           !nl80211_check_scan_feat(wiphy, *flags,
+                                    NL80211_SCAN_FLAG_RANDOM_SN,
+                                    NL80211_EXT_FEATURE_SCAN_RANDOM_SN) ||
+           !nl80211_check_scan_feat(wiphy, *flags,
+                                    NL80211_SCAN_FLAG_MIN_PREQ_CONTENT,
+                                    NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT))
                return -EOPNOTSUPP;
 
        if (*flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
@@ -6918,26 +7031,6 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev,
                        return err;
        }
 
-       if ((*flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME) &&
-           !wiphy_ext_feature_isset(wiphy,
-                                    NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME))
-               return -EOPNOTSUPP;
-
-       if ((*flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP) &&
-          !wiphy_ext_feature_isset(wiphy,
-                                   NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP))
-               return -EOPNOTSUPP;
-
-       if ((*flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) &&
-           !wiphy_ext_feature_isset(wiphy,
-                                    NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION))
-               return -EOPNOTSUPP;
-
-       if ((*flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE) &&
-           !wiphy_ext_feature_isset(wiphy,
-                                    NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE))
-               return -EOPNOTSUPP;
-
        return 0;
 }
 
@@ -10160,7 +10253,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
                if (err)
                        return err;
 
-               if (sinfo.filled & BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
+               if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
                        wdev->cqm_config->last_rssi_event_value =
                                (s8) sinfo.rx_beacon_signal_avg;
        }
@@ -10962,9 +11055,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                                    rem) {
                        u8 *mask_pat;
 
-                       nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                        nl80211_packet_pattern_policy,
-                                        info->extack);
+                       err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                              nl80211_packet_pattern_policy,
+                                              info->extack);
+                       if (err)
+                               goto error;
+
                        err = -EINVAL;
                        if (!pat_tb[NL80211_PKTPAT_MASK] ||
                            !pat_tb[NL80211_PKTPAT_PATTERN])
@@ -11213,8 +11309,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
                            rem) {
                u8 *mask_pat;
 
-               nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                nl80211_packet_pattern_policy, NULL);
+               err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                      nl80211_packet_pattern_policy, NULL);
+               if (err)
+                       return err;
+
                if (!pat_tb[NL80211_PKTPAT_MASK] ||
                    !pat_tb[NL80211_PKTPAT_PATTERN])
                        return -EINVAL;
index 570a2b67ca1036796cc5021a0f0ce546811a4e6f..6ab32f6a19616e0825691ddfa684105b00f00c2c 100644 (file)
@@ -102,7 +102,7 @@ static int wiphy_suspend(struct device *dev)
        struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
        int ret = 0;
 
-       rdev->suspend_at = get_seconds();
+       rdev->suspend_at = ktime_get_boottime_seconds();
 
        rtnl_lock();
        if (rdev->wiphy.registered) {
@@ -130,7 +130,7 @@ static int wiphy_resume(struct device *dev)
        int ret = 0;
 
        /* Age scan results with time spent in suspend */
-       cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
+       cfg80211_bss_age(rdev, ktime_get_boottime_seconds() - rdev->suspend_at);
 
        rtnl_lock();
        if (rdev->wiphy.registered && rdev->ops->resume)
index 3c654cd7ba562ad874c7176960c688b53fb80f61..e0825a019e9fb255adc2f4f749b08e241b2c2dde 100644 (file)
@@ -4,6 +4,7 @@
  *
  * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright 2017      Intel Deutschland GmbH
  */
 #include <linux/export.h>
 #include <linux/bitops.h>
@@ -1142,6 +1143,85 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
        return 0;
 }
 
+static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
+{
+#define SCALE 2048
+       u16 mcs_divisors[12] = {
+               34133, /* 16.666666... */
+               17067, /*  8.333333... */
+               11378, /*  5.555555... */
+                8533, /*  4.166666... */
+                5689, /*  2.777777... */
+                4267, /*  2.083333... */
+                3923, /*  1.851851... */
+                3413, /*  1.666666... */
+                2844, /*  1.388888... */
+                2560, /*  1.250000... */
+                2276, /*  1.111111... */
+                2048, /*  1.000000... */
+       };
+       u32 rates_160M[3] = { 960777777, 907400000, 816666666 };
+       u32 rates_969[3] =  { 480388888, 453700000, 408333333 };
+       u32 rates_484[3] =  { 229411111, 216666666, 195000000 };
+       u32 rates_242[3] =  { 114711111, 108333333,  97500000 };
+       u32 rates_106[3] =  {  40000000,  37777777,  34000000 };
+       u32 rates_52[3]  =  {  18820000,  17777777,  16000000 };
+       u32 rates_26[3]  =  {   9411111,   8888888,   8000000 };
+       u64 tmp;
+       u32 result;
+
+       if (WARN_ON_ONCE(rate->mcs > 11))
+               return 0;
+
+       if (WARN_ON_ONCE(rate->he_gi > NL80211_RATE_INFO_HE_GI_3_2))
+               return 0;
+       if (WARN_ON_ONCE(rate->he_ru_alloc >
+                        NL80211_RATE_INFO_HE_RU_ALLOC_2x996))
+               return 0;
+       if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8))
+               return 0;
+
+       if (rate->bw == RATE_INFO_BW_160)
+               result = rates_160M[rate->he_gi];
+       else if (rate->bw == RATE_INFO_BW_80 ||
+                (rate->bw == RATE_INFO_BW_HE_RU &&
+                 rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996))
+               result = rates_969[rate->he_gi];
+       else if (rate->bw == RATE_INFO_BW_40 ||
+                (rate->bw == RATE_INFO_BW_HE_RU &&
+                 rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484))
+               result = rates_484[rate->he_gi];
+       else if (rate->bw == RATE_INFO_BW_20 ||
+                (rate->bw == RATE_INFO_BW_HE_RU &&
+                 rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_242))
+               result = rates_242[rate->he_gi];
+       else if (rate->bw == RATE_INFO_BW_HE_RU &&
+                rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_106)
+               result = rates_106[rate->he_gi];
+       else if (rate->bw == RATE_INFO_BW_HE_RU &&
+                rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_52)
+               result = rates_52[rate->he_gi];
+       else if (rate->bw == RATE_INFO_BW_HE_RU &&
+                rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26)
+               result = rates_26[rate->he_gi];
+       else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
+                     rate->bw, rate->he_ru_alloc))
+               return 0;
+
+       /* now scale to the appropriate MCS */
+       tmp = result;
+       tmp *= SCALE;
+       do_div(tmp, mcs_divisors[rate->mcs]);
+       result = tmp;
+
+       /* and take NSS, DCM into account */
+       result = (result * rate->nss) / 8;
+       if (rate->he_dcm)
+               result /= 2;
+
+       return result;
+}
+
 u32 cfg80211_calculate_bitrate(struct rate_info *rate)
 {
        if (rate->flags & RATE_INFO_FLAGS_MCS)
@@ -1150,6 +1230,8 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate)
                return cfg80211_calculate_bitrate_60g(rate);
        if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
                return cfg80211_calculate_bitrate_vht(rate);
+       if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
+               return cfg80211_calculate_bitrate_he(rate);
 
        return rate->legacy;
 }
@@ -1791,8 +1873,9 @@ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
 
 int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp)
 {
-       sinfo->pertid = kcalloc(sizeof(*(sinfo->pertid)),
-                               IEEE80211_NUM_TIDS + 1, gfp);
+       sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1,
+                               sizeof(*(sinfo->pertid)),
+                               gfp);
        if (!sinfo->pertid)
                return -ENOMEM;
 
index 05186a47878fe93b87807130b00d978d3a6d9bc5..167f7025ac98288acbd57cd4627b1eb7fb2f6520 100644 (file)
@@ -1278,7 +1278,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
        if (err)
                return err;
 
-       if (!(sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)))
+       if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)))
                return -EOPNOTSUPP;
 
        rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
@@ -1320,7 +1320,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
 
        switch (rdev->wiphy.signal_type) {
        case CFG80211_SIGNAL_TYPE_MBM:
-               if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL)) {
+               if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) {
                        int sig = sinfo.signal;
                        wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
                        wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
@@ -1334,7 +1334,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
                        break;
                }
        case CFG80211_SIGNAL_TYPE_UNSPEC:
-               if (sinfo.filled & BIT(NL80211_STA_INFO_SIGNAL)) {
+               if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) {
                        wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
                        wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
                        wstats.qual.level = sinfo.signal;
@@ -1347,9 +1347,9 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
        }
 
        wstats.qual.updated |= IW_QUAL_NOISE_INVALID;
-       if (sinfo.filled & BIT(NL80211_STA_INFO_RX_DROP_MISC))
+       if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC))
                wstats.discard.misc = sinfo.rx_dropped_misc;
-       if (sinfo.filled & BIT(NL80211_STA_INFO_TX_FAILED))
+       if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))
                wstats.discard.retries = sinfo.tx_failed;
 
        return &wstats;
index f93365ae0fdd76b6aab9b6227cfcbb96f41eed82..d49aa79b79970d403b5c165d4000b2aa1d493442 100644 (file)
@@ -1750,7 +1750,7 @@ static const struct proto_ops x25_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       x25_accept,
        .getname =      x25_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        x25_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = compat_x25_ioctl,
index 36919a254ba370c37b4e199bfd68c285e25fdeb6..59fb7d3c36a34089095a81a979ecf64b8b152e7f 100644 (file)
@@ -118,6 +118,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
        u64 addr;
        int err;
 
+       if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+               return -EINVAL;
+
        if (!xskq_peek_addr(xs->umem->fq, &addr) ||
            len > xs->umem->chunk_size_nohr) {
                xs->rx_dropped++;
@@ -300,9 +303,10 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
        return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
 }
 
-static __poll_t xsk_poll_mask(struct socket *sock, __poll_t events)
+static unsigned int xsk_poll(struct file *file, struct socket *sock,
+                            struct poll_table_struct *wait)
 {
-       __poll_t mask = datagram_poll_mask(sock, events);
+       unsigned int mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
        struct xdp_sock *xs = xdp_sk(sk);
 
@@ -693,7 +697,7 @@ static const struct proto_ops xsk_proto_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = xsk_poll_mask,
+       .poll           = xsk_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
index 1303af10e54d5d44de7c4789a48c030ff2646e66..9ea2f7b648696e85c5ad097d5d09d6be5f2df51f 100644 (file)
@@ -52,6 +52,7 @@ hostprogs-y += xdp_adjust_tail
 hostprogs-y += xdpsock
 hostprogs-y += xdp_fwd
 hostprogs-y += task_fd_query
+hostprogs-y += xdp_sample_pkts
 
 # Libbpf dependencies
 LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
@@ -107,6 +108,7 @@ xdp_adjust_tail-objs := xdp_adjust_tail_user.o
 xdpsock-objs := bpf_load.o xdpsock_user.o
 xdp_fwd-objs := bpf_load.o xdp_fwd_user.o
 task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
+xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
@@ -163,6 +165,7 @@ always += xdp_adjust_tail_kern.o
 always += xdpsock_kern.o
 always += xdp_fwd_kern.o
 always += task_fd_query_kern.o
+always += xdp_sample_pkts_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 HOSTCFLAGS += -I$(srctree)/tools/lib/
@@ -179,6 +182,7 @@ HOSTCFLAGS_spintest_user.o += -I$(srctree)/tools/lib/bpf/
 HOSTCFLAGS_trace_event_user.o += -I$(srctree)/tools/lib/bpf/
 HOSTCFLAGS_sampleip_user.o += -I$(srctree)/tools/lib/bpf/
 HOSTCFLAGS_task_fd_query_user.o += -I$(srctree)/tools/lib/bpf/
+HOSTCFLAGS_xdp_sample_pkts_user.o += -I$(srctree)/tools/lib/bpf/
 
 HOST_LOADLIBES         += $(LIBBPF) -lelf
 HOSTLOADLIBES_tracex4          += -lrt
index 6673cdb9f55cab3fb32faaca755f805e8c10ed8f..a7e94e7ff87df5f60f7a57522de77b5929e46029 100644 (file)
@@ -48,9 +48,9 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
        struct ethhdr *eth = data;
        struct ipv6hdr *ip6h;
        struct iphdr *iph;
-       int out_index;
        u16 h_proto;
        u64 nh_off;
+       int rc;
 
        nh_off = sizeof(*eth);
        if (data + nh_off > data_end)
@@ -101,7 +101,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
 
        fib_params.ifindex = ctx->ingress_ifindex;
 
-       out_index = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
+       rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
 
        /* verify egress index has xdp support
         * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with
@@ -109,7 +109,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
         * NOTE: without verification that egress index supports XDP
         *       forwarding packets are dropped.
         */
-       if (out_index > 0) {
+       if (rc == 0) {
                if (h_proto == htons(ETH_P_IP))
                        ip_decrease_ttl(iph);
                else if (h_proto == htons(ETH_P_IPV6))
@@ -117,7 +117,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
 
                memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN);
                memcpy(eth->h_source, fib_params.smac, ETH_ALEN);
-               return bpf_redirect_map(&tx_port, out_index, 0);
+               return bpf_redirect_map(&tx_port, fib_params.ifindex, 0);
        }
 
        return XDP_PASS;
index 303e9e7161f3169ebcad88974a0d549f01e7db30..8cb703671b04687d17bd1d7c90b9a3e7f2fb193c 100644 (file)
@@ -134,7 +134,16 @@ bool parse_eth(struct ethhdr *eth, void *data_end,
                        return false;
                eth_type = vlan_hdr->h_vlan_encapsulated_proto;
        }
-       /* TODO: Handle double VLAN tagged packet */
+       /* Handle double VLAN tagged packet */
+       if (eth_type == htons(ETH_P_8021Q) || eth_type == htons(ETH_P_8021AD)) {
+               struct vlan_hdr *vlan_hdr;
+
+               vlan_hdr = (void *)eth + offset;
+               offset += sizeof(*vlan_hdr);
+               if ((void *)eth + offset > data_end)
+                       return false;
+               eth_type = vlan_hdr->h_vlan_encapsulated_proto;
+       }
 
        *eth_proto = ntohs(eth_type);
        *l3_offset = offset;
index 3fd2092916537b7ec4e5d00f25836d9c42729a53..222a83eed1cbf0b213998a47153334b3fd4127b9 100644 (file)
@@ -4,6 +4,8 @@
  *  Example howto extract XDP RX-queue info
  */
 #include <uapi/linux/bpf.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/in.h>
 #include "bpf_helpers.h"
 
 /* Config setup from with userspace
 struct config {
        __u32 action;
        int ifindex;
+       __u32 options;
+};
+enum cfg_options_flags {
+       NO_TOUCH = 0x0U,
+       READ_MEM = 0x1U,
+       SWAP_MAC = 0x2U,
 };
 struct bpf_map_def SEC("maps") config_map = {
        .type           = BPF_MAP_TYPE_ARRAY,
@@ -45,6 +53,23 @@ struct bpf_map_def SEC("maps") rx_queue_index_map = {
        .max_entries    = MAX_RXQs + 1,
 };
 
+static __always_inline
+void swap_src_dst_mac(void *data)
+{
+       unsigned short *p = data;
+       unsigned short dst[3];
+
+       dst[0] = p[0];
+       dst[1] = p[1];
+       dst[2] = p[2];
+       p[0] = p[3];
+       p[1] = p[4];
+       p[2] = p[5];
+       p[3] = dst[0];
+       p[4] = dst[1];
+       p[5] = dst[2];
+}
+
 SEC("xdp_prog0")
 int  xdp_prognum0(struct xdp_md *ctx)
 {
@@ -90,6 +115,24 @@ int  xdp_prognum0(struct xdp_md *ctx)
        if (key == MAX_RXQs)
                rxq_rec->issue++;
 
+       /* Default: Don't touch packet data, only count packets */
+       if (unlikely(config->options & (READ_MEM|SWAP_MAC))) {
+               struct ethhdr *eth = data;
+
+               if (eth + 1 > data_end)
+                       return XDP_ABORTED;
+
+               /* Avoid compiler removing this: Drop non 802.3 Ethertypes */
+               if (ntohs(eth->h_proto) < ETH_P_802_3_MIN)
+                       return XDP_ABORTED;
+
+               /* XDP_TX requires changing MAC-addrs, else HW may drop.
+                * Can also be enabled with --swapmac (for test purposes)
+                */
+               if (unlikely(config->options & SWAP_MAC))
+                       swap_src_dst_mac(data);
+       }
+
        return config->action;
 }
 
index e4e9ba52bff02c457410543424b0f90148a01730..248a7eab9531eba322f6fb5b404c9a26a516adba 100644 (file)
@@ -50,6 +50,8 @@ static const struct option long_options[] = {
        {"sec",         required_argument,      NULL, 's' },
        {"no-separators", no_argument,          NULL, 'z' },
        {"action",      required_argument,      NULL, 'a' },
+       {"readmem",     no_argument,            NULL, 'r' },
+       {"swapmac",     no_argument,            NULL, 'm' },
        {0, 0, NULL,  0 }
 };
 
@@ -66,6 +68,12 @@ static void int_exit(int sig)
 struct config {
        __u32 action;
        int ifindex;
+       __u32 options;
+};
+enum cfg_options_flags {
+       NO_TOUCH = 0x0U,
+       READ_MEM = 0x1U,
+       SWAP_MAC = 0x2U,
 };
 #define XDP_ACTION_MAX (XDP_TX + 1)
 #define XDP_ACTION_MAX_STRLEN 11
@@ -109,6 +117,18 @@ static void list_xdp_actions(void)
        printf("\n");
 }
 
+static char* options2str(enum cfg_options_flags flag)
+{
+       if (flag == NO_TOUCH)
+               return "no_touch";
+       if (flag & SWAP_MAC)
+               return "swapmac";
+       if (flag & READ_MEM)
+               return "read";
+       fprintf(stderr, "ERR: Unknown config option flags");
+       exit(EXIT_FAIL);
+}
+
 static void usage(char *argv[])
 {
        int i;
@@ -305,7 +325,7 @@ static __u64 calc_errs_pps(struct datarec *r,
 
 static void stats_print(struct stats_record *stats_rec,
                        struct stats_record *stats_prev,
-                       int action)
+                       int action, __u32 cfg_opt)
 {
        unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
        unsigned int nr_cpus = bpf_num_possible_cpus();
@@ -316,8 +336,8 @@ static void stats_print(struct stats_record *stats_rec,
        int i;
 
        /* Header */
-       printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s\n",
-              ifname, ifindex, action2str(action));
+       printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s options:%s\n",
+              ifname, ifindex, action2str(action), options2str(cfg_opt));
 
        /* stats_global_map */
        {
@@ -399,7 +419,7 @@ static inline void swap(struct stats_record **a, struct stats_record **b)
        *b = tmp;
 }
 
-static void stats_poll(int interval, int action)
+static void stats_poll(int interval, int action, __u32 cfg_opt)
 {
        struct stats_record *record, *prev;
 
@@ -410,7 +430,7 @@ static void stats_poll(int interval, int action)
        while (1) {
                swap(&prev, &record);
                stats_collect(record);
-               stats_print(record, prev, action);
+               stats_print(record, prev, action, cfg_opt);
                sleep(interval);
        }
 
@@ -421,6 +441,7 @@ static void stats_poll(int interval, int action)
 
 int main(int argc, char **argv)
 {
+       __u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */
        struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
        struct bpf_prog_load_attr prog_load_attr = {
                .prog_type      = BPF_PROG_TYPE_XDP,
@@ -435,6 +456,7 @@ int main(int argc, char **argv)
        int interval = 2;
        __u32 key = 0;
 
+
        char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 };
        int action = XDP_PASS; /* Default action */
        char *action_str = NULL;
@@ -496,6 +518,12 @@ int main(int argc, char **argv)
                        action_str = (char *)&action_str_buf;
                        strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN);
                        break;
+               case 'r':
+                       cfg_options |= READ_MEM;
+                       break;
+               case 'm':
+                       cfg_options |= SWAP_MAC;
+                       break;
                case 'h':
                error:
                default:
@@ -523,6 +551,11 @@ int main(int argc, char **argv)
        }
        cfg.action = action;
 
+       /* XDP_TX requires changing MAC-addrs, else HW may drop */
+       if (action == XDP_TX)
+               cfg_options |= SWAP_MAC;
+       cfg.options = cfg_options;
+
        /* Trick to pretty printf with thousands separators use %' */
        if (use_separators)
                setlocale(LC_NUMERIC, "en_US");
@@ -542,6 +575,6 @@ int main(int argc, char **argv)
                return EXIT_FAIL_XDP;
        }
 
-       stats_poll(interval, action);
+       stats_poll(interval, action, cfg_options);
        return EXIT_OK;
 }
diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c
new file mode 100644 (file)
index 0000000..f7ca8b8
--- /dev/null
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+#define SAMPLE_SIZE 64ul
+#define MAX_CPUS 128
+
+#define bpf_printk(fmt, ...)                                   \
+({                                                             \
+              char ____fmt[] = fmt;                            \
+              bpf_trace_printk(____fmt, sizeof(____fmt),       \
+                               ##__VA_ARGS__);                 \
+})
+
+struct bpf_map_def SEC("maps") my_map = {
+       .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+       .key_size = sizeof(int),
+       .value_size = sizeof(u32),
+       .max_entries = MAX_CPUS,
+};
+
+SEC("xdp_sample")
+int xdp_sample_prog(struct xdp_md *ctx)
+{
+       void *data_end = (void *)(long)ctx->data_end;
+       void *data = (void *)(long)ctx->data;
+
+       /* Metadata will be in the perf event before the packet data. */
+       struct S {
+               u16 cookie;
+               u16 pkt_len;
+       } __packed metadata;
+
+       if (data < data_end) {
+               /* The XDP perf_event_output handler will use the upper 32 bits
+                * of the flags argument as a number of bytes to include of the
+                * packet payload in the event data. If the size is too big, the
+                * call to bpf_perf_event_output will fail and return -EFAULT.
+                *
+                * See bpf_xdp_event_output in net/core/filter.c.
+                *
+                * The BPF_F_CURRENT_CPU flag means that the event output fd
+                * will be indexed by the CPU number in the event map.
+                */
+               u64 flags = BPF_F_CURRENT_CPU;
+               u16 sample_size;
+               int ret;
+
+               metadata.cookie = 0xdead;
+               metadata.pkt_len = (u16)(data_end - data);
+               sample_size = min(metadata.pkt_len, SAMPLE_SIZE);
+               flags |= (u64)sample_size << 32;
+
+               ret = bpf_perf_event_output(ctx, &my_map, flags,
+                                           &metadata, sizeof(metadata));
+               if (ret)
+                       bpf_printk("perf_event_output failed: %d\n", ret);
+       }
+
+       return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
new file mode 100644 (file)
index 0000000..8dd87c1
--- /dev/null
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include <net/if.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/sysinfo.h>
+#include <sys/ioctl.h>
+#include <signal.h>
+#include <libbpf.h>
+#include <bpf/bpf.h>
+
+#include "perf-sys.h"
+#include "trace_helpers.h"
+
+#define MAX_CPUS 128
+static int pmu_fds[MAX_CPUS], if_idx;
+static struct perf_event_mmap_page *headers[MAX_CPUS];
+static char *if_name;
+
+static int do_attach(int idx, int fd, const char *name)
+{
+       int err;
+
+       err = bpf_set_link_xdp_fd(idx, fd, 0);
+       if (err < 0)
+               printf("ERROR: failed to attach program to %s\n", name);
+
+       return err;
+}
+
+static int do_detach(int idx, const char *name)
+{
+       int err;
+
+       err = bpf_set_link_xdp_fd(idx, -1, 0);
+       if (err < 0)
+               printf("ERROR: failed to detach program from %s\n", name);
+
+       return err;
+}
+
+#define SAMPLE_SIZE 64
+
+static int print_bpf_output(void *data, int size)
+{
+       struct {
+               __u16 cookie;
+               __u16 pkt_len;
+               __u8  pkt_data[SAMPLE_SIZE];
+       } __packed *e = data;
+       int i;
+
+       if (e->cookie != 0xdead) {
+               printf("BUG cookie %x sized %d\n",
+                      e->cookie, size);
+               return LIBBPF_PERF_EVENT_ERROR;
+       }
+
+       printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len);
+       for (i = 0; i < 14 && i < e->pkt_len; i++)
+               printf("%02x ", e->pkt_data[i]);
+       printf("\n");
+
+       return LIBBPF_PERF_EVENT_CONT;
+}
+
+static void test_bpf_perf_event(int map_fd, int num)
+{
+       struct perf_event_attr attr = {
+               .sample_type = PERF_SAMPLE_RAW,
+               .type = PERF_TYPE_SOFTWARE,
+               .config = PERF_COUNT_SW_BPF_OUTPUT,
+               .wakeup_events = 1, /* get an fd notification for every event */
+       };
+       int i;
+
+       for (i = 0; i < num; i++) {
+               int key = i;
+
+               pmu_fds[i] = sys_perf_event_open(&attr, -1/*pid*/, i/*cpu*/,
+                                                -1/*group_fd*/, 0);
+
+               assert(pmu_fds[i] >= 0);
+               assert(bpf_map_update_elem(map_fd, &key,
+                                          &pmu_fds[i], BPF_ANY) == 0);
+               ioctl(pmu_fds[i], PERF_EVENT_IOC_ENABLE, 0);
+       }
+}
+
+static void sig_handler(int signo)
+{
+       do_detach(if_idx, if_name);
+       exit(0);
+}
+
+int main(int argc, char **argv)
+{
+       struct bpf_prog_load_attr prog_load_attr = {
+               .prog_type      = BPF_PROG_TYPE_XDP,
+       };
+       struct bpf_object *obj;
+       struct bpf_map *map;
+       int prog_fd, map_fd;
+       char filename[256];
+       int ret, err, i;
+       int numcpus;
+
+       if (argc < 2) {
+               printf("Usage: %s <ifname>\n", argv[0]);
+               return 1;
+       }
+
+       numcpus = get_nprocs();
+       if (numcpus > MAX_CPUS)
+               numcpus = MAX_CPUS;
+
+       snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+       prog_load_attr.file = filename;
+
+       if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
+               return 1;
+
+       if (!prog_fd) {
+               printf("load_bpf_file: %s\n", strerror(errno));
+               return 1;
+       }
+
+       map = bpf_map__next(NULL, obj);
+       if (!map) {
+               printf("finding a map in obj file failed\n");
+               return 1;
+       }
+       map_fd = bpf_map__fd(map);
+
+       if_idx = if_nametoindex(argv[1]);
+       if (!if_idx)
+               if_idx = strtoul(argv[1], NULL, 0);
+
+       if (!if_idx) {
+               fprintf(stderr, "Invalid ifname\n");
+               return 1;
+       }
+       if_name = argv[1];
+       err = do_attach(if_idx, prog_fd, argv[1]);
+       if (err)
+               return err;
+
+       if (signal(SIGINT, sig_handler) ||
+           signal(SIGHUP, sig_handler) ||
+           signal(SIGTERM, sig_handler)) {
+               perror("signal");
+               return 1;
+       }
+
+       test_bpf_perf_event(map_fd, numcpus);
+
+       for (i = 0; i < numcpus; i++)
+               if (perf_event_mmap_header(pmu_fds[i], &headers[i]) < 0)
+                       return 1;
+
+       ret = perf_event_poller_multi(pmu_fds, headers, numcpus,
+                                     print_bpf_output);
+       kill(0, SIGINT);
+       return ret;
+}
index 34d9e9ce97c29c5e0ca78e6b06085a6b29ffd8bd..e7889f486ca1fdced1cba49f3623ce7f742d2193 100644 (file)
@@ -239,6 +239,7 @@ cmd_record_mcount =                                         \
             "$(CC_FLAGS_FTRACE)" ]; then                       \
                $(sub_cmd_record_mcount)                        \
        fi;
+endif # -record-mcount
 endif # CONFIG_FTRACE_MCOUNT_RECORD
 
 ifdef CONFIG_STACK_VALIDATION
@@ -263,7 +264,6 @@ ifneq ($(RETPOLINE_CFLAGS),)
   objtool_args += --retpoline
 endif
 endif
-endif
 
 
 ifdef CONFIG_MODVERSIONS
index 208eb2825dab017a9d3fdc0bdb8beef053b5626d..6efcead3198989d2ab2ab6772c72d8bb61c89c4e 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
-cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 && echo "y"
+cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1
 #include <stdio.h>
 int main(void)
 {
index e3b7362b0ee457b9601a8628609d8ebf78fcb09a..a9c05506e325fc8407d915e23aece818bb6fdb2d 100755 (executable)
@@ -2606,12 +2606,6 @@ sub process {
                             "A patch subject line should describe the change not the tool that found it\n" . $herecurr);
                }
 
-# Check for old stable address
-               if ($line =~ /^\s*cc:\s*.*<?\bstable\@kernel\.org\b>?.*$/i) {
-                       ERROR("STABLE_ADDRESS",
-                             "The 'stable' address should be 'stable\@vger.kernel.org'\n" . $herecurr);
-               }
-
 # Check for unwanted Gerrit info
                if ($in_commit_log && $line =~ /^\s*change-id:/i) {
                        ERROR("GERRIT_CHANGE_ID",
index 3755af0cd9f7f24c1942fd9df216c525f79d04b3..75e4e22b986adcfd07197777c5d59d5601d3c920 100755 (executable)
@@ -1,4 +1,4 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
index 94a383b21df6405f4a9f6b6c08758d6c822381d8..f63b41b0dd498d23b65b3c12fe47e3b2c87e148d 100644 (file)
@@ -171,6 +171,9 @@ struct symbol {
  * config BAZ
  *         int "BAZ Value"
  *         range 1..255
+ *
+ * Please, also check zconf.y:print_symbol() when modifying the
+ * list of property types!
  */
 enum prop_type {
        P_UNKNOWN,
index 65da87fce907ad2bc7b52adba4651dc2c32786be..5ca2df790d3cfa5f4253a33a303219aaa8fc4394 100644 (file)
@@ -156,7 +156,7 @@ static char *do_shell(int argc, char *argv[])
                nread--;
 
        /* remove trailing new lines */
-       while (buf[nread - 1] == '\n')
+       while (nread > 0 && buf[nread - 1] == '\n')
                nread--;
 
        buf[nread] = 0;
index 6f9b0aa32a82239b2bc1540d1b75949859ea48e9..4b68272ebdb96cb25e8d91de0d4cbc792a3079b6 100644 (file)
@@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE];
 static struct menu *current_menu, *current_entry;
 
 %}
-%expect 32
+%expect 31
 
 %union
 {
@@ -337,7 +337,7 @@ choice_block:
 
 /* if entry */
 
-if_entry: T_IF expr nl
+if_entry: T_IF expr T_EOL
 {
        printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
        menu_add_entry(NULL);
@@ -717,6 +717,10 @@ static void print_symbol(FILE *out, struct menu *menu)
                        print_quoted_string(out, prop->text);
                        fputc('\n', out);
                        break;
+               case P_SYMBOL:
+                       fputs( "  symbol ", out);
+                       fprintf(out, "%s\n", prop->sym->name);
+                       break;
                default:
                        fprintf(out, "  unknown prop %d!\n", prop->type);
                        break;
index f7403821db7f0aafdec4a2e9a6804b1b8c2a599b..b203f7758f9765f056c3e0d07e0286c49b181253 100644 (file)
@@ -142,6 +142,8 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc)
  * The src pointer is defined as Z || other info where Z is the shared secret
  * from DH and other info is an arbitrary string (see SP800-56A section
  * 5.8.1.2).
+ *
+ * 'dlen' must be a multiple of the digest size.
  */
 static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
                   u8 *dst, unsigned int dlen, unsigned int zlen)
@@ -205,8 +207,8 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
 {
        uint8_t *outbuf = NULL;
        int ret;
-       size_t outbuf_len = round_up(buflen,
-                                    crypto_shash_digestsize(sdesc->shash.tfm));
+       size_t outbuf_len = roundup(buflen,
+                                   crypto_shash_digestsize(sdesc->shash.tfm));
 
        outbuf = kmalloc(outbuf_len, GFP_KERNEL);
        if (!outbuf) {
index f3d374d2ca045ce7325b20ad3cecb6304418d1b3..79d3709b06717a1f6452fe85b9922244b9f70381 100644 (file)
@@ -441,22 +441,16 @@ static int sel_release_policy(struct inode *inode, struct file *filp)
 static ssize_t sel_read_policy(struct file *filp, char __user *buf,
                               size_t count, loff_t *ppos)
 {
-       struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
        struct policy_load_memory *plm = filp->private_data;
        int ret;
 
-       mutex_lock(&fsi->mutex);
-
        ret = avc_has_perm(&selinux_state,
                           current_sid(), SECINITSID_SECURITY,
                          SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
        if (ret)
-               goto out;
+               return ret;
 
-       ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
-out:
-       mutex_unlock(&fsi->mutex);
-       return ret;
+       return simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
 }
 
 static vm_fault_t sel_mmap_policy_fault(struct vm_fault *vmf)
@@ -1188,25 +1182,29 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
        ret = -EINVAL;
        if (index >= fsi->bool_num || strcmp(name,
                                             fsi->bool_pending_names[index]))
-               goto out;
+               goto out_unlock;
 
        ret = -ENOMEM;
        page = (char *)get_zeroed_page(GFP_KERNEL);
        if (!page)
-               goto out;
+               goto out_unlock;
 
        cur_enforcing = security_get_bool_value(fsi->state, index);
        if (cur_enforcing < 0) {
                ret = cur_enforcing;
-               goto out;
+               goto out_unlock;
        }
        length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
                          fsi->bool_pending_values[index]);
-       ret = simple_read_from_buffer(buf, count, ppos, page, length);
-out:
        mutex_unlock(&fsi->mutex);
+       ret = simple_read_from_buffer(buf, count, ppos, page, length);
+out_free:
        free_page((unsigned long)page);
        return ret;
+
+out_unlock:
+       mutex_unlock(&fsi->mutex);
+       goto out_free;
 }
 
 static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
@@ -1219,6 +1217,17 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
        unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
        const char *name = filep->f_path.dentry->d_name.name;
 
+       if (count >= PAGE_SIZE)
+               return -ENOMEM;
+
+       /* No partial writes. */
+       if (*ppos != 0)
+               return -EINVAL;
+
+       page = memdup_user_nul(buf, count);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
        mutex_lock(&fsi->mutex);
 
        length = avc_has_perm(&selinux_state,
@@ -1233,22 +1242,6 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
                                             fsi->bool_pending_names[index]))
                goto out;
 
-       length = -ENOMEM;
-       if (count >= PAGE_SIZE)
-               goto out;
-
-       /* No partial writes. */
-       length = -EINVAL;
-       if (*ppos != 0)
-               goto out;
-
-       page = memdup_user_nul(buf, count);
-       if (IS_ERR(page)) {
-               length = PTR_ERR(page);
-               page = NULL;
-               goto out;
-       }
-
        length = -EINVAL;
        if (sscanf(page, "%d", &new_value) != 1)
                goto out;
@@ -1280,6 +1273,17 @@ static ssize_t sel_commit_bools_write(struct file *filep,
        ssize_t length;
        int new_value;
 
+       if (count >= PAGE_SIZE)
+               return -ENOMEM;
+
+       /* No partial writes. */
+       if (*ppos != 0)
+               return -EINVAL;
+
+       page = memdup_user_nul(buf, count);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
        mutex_lock(&fsi->mutex);
 
        length = avc_has_perm(&selinux_state,
@@ -1289,22 +1293,6 @@ static ssize_t sel_commit_bools_write(struct file *filep,
        if (length)
                goto out;
 
-       length = -ENOMEM;
-       if (count >= PAGE_SIZE)
-               goto out;
-
-       /* No partial writes. */
-       length = -EINVAL;
-       if (*ppos != 0)
-               goto out;
-
-       page = memdup_user_nul(buf, count);
-       if (IS_ERR(page)) {
-               length = PTR_ERR(page);
-               page = NULL;
-               goto out;
-       }
-
        length = -EINVAL;
        if (sscanf(page, "%d", &new_value) != 1)
                goto out;
index 7ad226018f51674b8e97a5d6ff2aaeabd9163bbd..19de675d4504501f8a48c28302dd9ad552b70877 100644 (file)
@@ -2296,6 +2296,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
        struct smack_known *skp = smk_of_task_struct(p);
 
        isp->smk_inode = skp;
+       isp->smk_flags |= SMK_INODE_INSTANT;
 }
 
 /*
index 61a07fe34cd271e60dc0c31a7dddae750c2532b1..56ca78423040f09e6d0569b651ba631105b8bd02 100644 (file)
@@ -2004,7 +2004,8 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
        struct snd_seq_client *cptr = NULL;
 
        /* search for next client */
-       info->client++;
+       if (info->client < INT_MAX)
+               info->client++;
        if (info->client < 0)
                info->client = 0;
        for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
index 665089c455603c0c683144c419981f8215780e85..b6f076bbc72d14be37893e20b19cbfaedf2f728b 100644 (file)
@@ -1520,7 +1520,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
                                } else {
                                        if (id.subdevice < 0)
                                                id.subdevice = 0;
-                                       else
+                                       else if (id.subdevice < INT_MAX)
                                                id.subdevice++;
                                }
                        }
index d91c87e41756ea5fceaee73d84e211b9ebba929d..20a171ac4bb2f7cff9715122c4e959c60fc9b485 100644 (file)
@@ -2899,8 +2899,9 @@ static int hda_codec_runtime_suspend(struct device *dev)
        list_for_each_entry(pcm, &codec->pcm_list_head, list)
                snd_pcm_suspend_all(pcm->pcm);
        state = hda_call_codec_suspend(codec);
-       if (codec_has_clkstop(codec) && codec_has_epss(codec) &&
-           (state & AC_PWRST_CLK_STOP_OK))
+       if (codec->link_down_at_suspend ||
+           (codec_has_clkstop(codec) && codec_has_epss(codec) &&
+            (state & AC_PWRST_CLK_STOP_OK)))
                snd_hdac_codec_link_down(&codec->core);
        snd_hdac_link_power(&codec->core, false);
        return 0;
index 681c360f29f9d628cf4462c9bb7ef92879f27d91..a8b1b31f161c26f739892ea6b52e79ba2ebca291 100644 (file)
@@ -258,6 +258,7 @@ struct hda_codec {
        unsigned int power_save_node:1; /* advanced PM for each widget */
        unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
        unsigned int force_pin_prefix:1; /* Add location prefix */
+       unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
 #ifdef CONFIG_PM
        unsigned long power_on_acct;
        unsigned long power_off_acct;
index 04e949aa01ada5492765cd313608624f3a42c7b9..4ff5320378e22f8e4ef67a80afeb2982431d6fff 100644 (file)
@@ -991,6 +991,7 @@ struct ca0132_spec {
 enum {
        QUIRK_NONE,
        QUIRK_ALIENWARE,
+       QUIRK_ALIENWARE_M17XR4,
        QUIRK_SBZ,
        QUIRK_R3DI,
 };
@@ -1040,6 +1041,7 @@ static const struct hda_pintbl r3di_pincfgs[] = {
 };
 
 static const struct snd_pci_quirk ca0132_quirks[] = {
+       SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4),
        SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
@@ -5663,7 +5665,7 @@ static const char * const ca0132_alt_slave_pfxs[] = {
  * I think this has to do with the pin for rear surround being 0x11,
  * and the center/lfe being 0x10. Usually the pin order is the opposite.
  */
-const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
+static const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
        { .channels = 2,
          .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
        { .channels = 4,
@@ -5966,7 +5968,7 @@ static int ca0132_build_pcms(struct hda_codec *codec)
        info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
 
        /* With the DSP enabled, desktops don't use this ADC. */
-       if (spec->use_alt_functions) {
+       if (!spec->use_alt_functions) {
                info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2");
                if (!info)
                        return -ENOMEM;
@@ -6130,7 +6132,10 @@ static void ca0132_init_dmic(struct hda_codec *codec)
         * Bit   6: set to select Data2, clear for Data1
         * Bit   7: set to enable DMic, clear for AMic
         */
-       val = 0x23;
+       if (spec->quirk == QUIRK_ALIENWARE_M17XR4)
+               val = 0x33;
+       else
+               val = 0x23;
        /* keep a copy of dmic ctl val for enable/disable dmic purpuse */
        spec->dmic_ctl = val;
        snd_hda_codec_write(codec, spec->input_pins[0], 0,
@@ -7223,7 +7228,7 @@ static int ca0132_init(struct hda_codec *codec)
 
        snd_hda_sequence_write(codec, spec->base_init_verbs);
 
-       if (spec->quirk != QUIRK_NONE)
+       if (spec->use_alt_functions)
                ca0132_alt_init(codec);
 
        ca0132_download_dsp(codec);
@@ -7237,8 +7242,9 @@ static int ca0132_init(struct hda_codec *codec)
        case QUIRK_R3DI:
                r3di_setup_defaults(codec);
                break;
-       case QUIRK_NONE:
-       case QUIRK_ALIENWARE:
+       case QUIRK_SBZ:
+               break;
+       default:
                ca0132_setup_defaults(codec);
                ca0132_init_analog_mic2(codec);
                ca0132_init_dmic(codec);
@@ -7343,7 +7349,6 @@ static const struct hda_codec_ops ca0132_patch_ops = {
 static void ca0132_config(struct hda_codec *codec)
 {
        struct ca0132_spec *spec = codec->spec;
-       struct auto_pin_cfg *cfg = &spec->autocfg;
 
        spec->dacs[0] = 0x2;
        spec->dacs[1] = 0x3;
@@ -7405,12 +7410,7 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                spec->dig_in = 0x09;
-               cfg->dig_in_pin = 0x0e;
-               cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
                break;
        case QUIRK_R3DI:
                codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__);
@@ -7438,9 +7438,6 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                break;
        default:
                spec->num_outputs = 2;
@@ -7463,12 +7460,7 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                spec->dig_in = 0x09;
-               cfg->dig_in_pin = 0x0e;
-               cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
                break;
        }
 }
@@ -7476,7 +7468,7 @@ static void ca0132_config(struct hda_codec *codec)
 static int ca0132_prepare_verbs(struct hda_codec *codec)
 {
 /* Verbs + terminator (an empty element) */
-#define NUM_SPEC_VERBS 4
+#define NUM_SPEC_VERBS 2
        struct ca0132_spec *spec = codec->spec;
 
        spec->chip_init_verbs = ca0132_init_verbs0;
@@ -7488,34 +7480,24 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
        if (!spec->spec_init_verbs)
                return -ENOMEM;
 
-       /* HP jack autodetection */
-       spec->spec_init_verbs[0].nid = spec->unsol_tag_hp;
-       spec->spec_init_verbs[0].param = AC_VERB_SET_UNSOLICITED_ENABLE;
-       spec->spec_init_verbs[0].verb = AC_USRSP_EN | spec->unsol_tag_hp;
-
-       /* MIC1 jack autodetection */
-       spec->spec_init_verbs[1].nid = spec->unsol_tag_amic1;
-       spec->spec_init_verbs[1].param = AC_VERB_SET_UNSOLICITED_ENABLE;
-       spec->spec_init_verbs[1].verb = AC_USRSP_EN | spec->unsol_tag_amic1;
-
        /* config EAPD */
-       spec->spec_init_verbs[2].nid = 0x0b;
-       spec->spec_init_verbs[2].param = 0x78D;
-       spec->spec_init_verbs[2].verb = 0x00;
+       spec->spec_init_verbs[0].nid = 0x0b;
+       spec->spec_init_verbs[0].param = 0x78D;
+       spec->spec_init_verbs[0].verb = 0x00;
 
        /* Previously commented configuration */
        /*
-       spec->spec_init_verbs[3].nid = 0x0b;
-       spec->spec_init_verbs[3].param = AC_VERB_SET_EAPD_BTLENABLE;
+       spec->spec_init_verbs[2].nid = 0x0b;
+       spec->spec_init_verbs[2].param = AC_VERB_SET_EAPD_BTLENABLE;
+       spec->spec_init_verbs[2].verb = 0x02;
+
+       spec->spec_init_verbs[3].nid = 0x10;
+       spec->spec_init_verbs[3].param = 0x78D;
        spec->spec_init_verbs[3].verb = 0x02;
 
        spec->spec_init_verbs[4].nid = 0x10;
-       spec->spec_init_verbs[4].param = 0x78D;
+       spec->spec_init_verbs[4].param = AC_VERB_SET_EAPD_BTLENABLE;
        spec->spec_init_verbs[4].verb = 0x02;
-
-       spec->spec_init_verbs[5].nid = 0x10;
-       spec->spec_init_verbs[5].param = AC_VERB_SET_EAPD_BTLENABLE;
-       spec->spec_init_verbs[5].verb = 0x02;
        */
 
        /* Terminator: spec->spec_init_verbs[NUM_SPEC_VERBS-1] */
index 8840daf9c6a300899efaf02898430d158d5b972a..98e1c411c56abbd36bcd18d1c13fa27f2826bad2 100644 (file)
@@ -3741,6 +3741,11 @@ static int patch_atihdmi(struct hda_codec *codec)
 
        spec->chmap.channels_max = max(spec->chmap.channels_max, 8u);
 
+       /* AMD GPUs have neither EPSS nor CLKSTOP bits, hence preventing
+        * the link-down as is.  Tell the core to allow it.
+        */
+       codec->link_down_at_suspend = 1;
+
        return 0;
 }
 
index e9bd33ea538f239891c031a1a81e075a35c75043..5ad6c7e5f92efab74718a48e8531e6e84fdc73d8 100644 (file)
@@ -2545,6 +2545,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
        SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
        SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
+       SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
        SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
        SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
        SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
@@ -4995,7 +4996,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
-               spec->shutup = alc_no_shutup; /* reduce click noise */
                spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
                codec->power_save_node = 0; /* avoid click noises */
@@ -5394,6 +5394,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
+static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */
+       hda_fixup_thinkpad_acpi(codec, fix, action);
+}
+
 /* for dell wmi mic mute led */
 #include "dell_wmi_helper.c"
 
@@ -5946,7 +5953,7 @@ static const struct hda_fixup alc269_fixups[] = {
        },
        [ALC269_FIXUP_THINKPAD_ACPI] = {
                .type = HDA_FIXUP_FUNC,
-               .v.func = hda_fixup_thinkpad_acpi,
+               .v.func = alc_fixup_thinkpad_acpi,
                .chained = true,
                .chain_id = ALC269_FIXUP_SKU_IGNORE,
        },
@@ -6603,8 +6610,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
-       SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x3136, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -6782,6 +6790,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x19, 0x02a11030},
                {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+               {0x14, 0x90170110},
+               {0x19, 0x02a11030},
+               {0x1a, 0x02a11040},
+               {0x1b, 0x01014020},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60140},
                {0x14, 0x90170110},
index 6c85f13ab23f17f7ef4031f5f73797fb383584a7..54f6252faca684b23ef91617318c8ae11e3de04e 100644 (file)
@@ -1018,6 +1018,7 @@ static int snd_lx6464es_create(struct snd_card *card,
        chip->port_dsp_bar = pci_ioremap_bar(pci, 2);
        if (!chip->port_dsp_bar) {
                dev_err(card->dev, "cannot remap PCI memory region\n");
+               err = -ENOMEM;
                goto remap_pci_failed;
        }
 
index caae4843cb7001fbee1fa9b222850df7006850fb..16e006f708ca0cbd44a63135bb996b8db7c3ba9e 100644 (file)
@@ -91,6 +91,7 @@ struct kvm_regs {
 #define KVM_VGIC_V3_ADDR_TYPE_DIST     2
 #define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
 #define KVM_VGIC_ITS_ADDR_TYPE         4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION    5
 
 #define KVM_VGIC_V3_DIST_SIZE          SZ_64K
 #define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
index 04b3256f8e6d5f8e3e368b043f0fdcfeb7c23164..4e76630dd6554673d71ad647c1108bb54f1bcea2 100644 (file)
@@ -91,6 +91,7 @@ struct kvm_regs {
 #define KVM_VGIC_V3_ADDR_TYPE_DIST     2
 #define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
 #define KVM_VGIC_ITS_ADDR_TYPE         4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION    5
 
 #define KVM_VGIC_V3_DIST_SIZE          SZ_64K
 #define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
index 833ed9a16adfd03e0b6cb70adc19fe03055f7344..1b32b56a03d34ce2a5f0b7f79c621f87d8c89dbf 100644 (file)
@@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char {
 #define KVM_REG_PPC_PSSCR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
 
 #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
+#define KVM_REG_PPC_ONLINE     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
 
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
index 389c36fd82990f3f6b390342f56375ac0067054a..ac5ba55066dd76a26f133d91623309036bcad4c8 100644 (file)
 #define __NR_pkey_alloc                384
 #define __NR_pkey_free         385
 #define __NR_pkey_mprotect     386
+#define __NR_rseq              387
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index fb00a2fca9901eb02ea7b730ddbac957e8ecc947..5701f5cecd3125fbce64ead21d89d02fc8fa25af 100644 (file)
 #define X86_FEATURE_AMD_IBPB           (13*32+12) /* "" Indirect Branch Prediction Barrier */
 #define X86_FEATURE_AMD_IBRS           (13*32+14) /* "" Indirect Branch Restricted Speculation */
 #define X86_FEATURE_AMD_STIBP          (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_SSBD           (13*32+24) /* "" Speculative Store Bypass Disable */
 #define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+#define X86_FEATURE_AMD_SSB_NO         (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
diff --git a/tools/bpf/Makefile.helpers b/tools/bpf/Makefile.helpers
new file mode 100644 (file)
index 0000000..c34fea7
--- /dev/null
@@ -0,0 +1,59 @@
+ifndef allow-override
+  include ../scripts/Makefile.include
+  include ../scripts/utilities.mak
+else
+  # Assume Makefile.helpers is being run from bpftool/Documentation
+  # subdirectory. Go up two more directories to fetch bpf.h header and
+  # associated script.
+  UP2DIR := ../../
+endif
+
+INSTALL ?= install
+RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
+
+ifeq ($(V),1)
+  Q =
+else
+  Q = @
+endif
+
+prefix ?= /usr/local
+mandir ?= $(prefix)/man
+man7dir = $(mandir)/man7
+
+HELPERS_RST = bpf-helpers.rst
+MAN7_RST = $(HELPERS_RST)
+
+_DOC_MAN7 = $(patsubst %.rst,%.7,$(MAN7_RST))
+DOC_MAN7 = $(addprefix $(OUTPUT),$(_DOC_MAN7))
+
+helpers: man7
+man7: $(DOC_MAN7)
+
+RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+
+$(OUTPUT)$(HELPERS_RST): $(UP2DIR)../../include/uapi/linux/bpf.h
+       $(QUIET_GEN)$(UP2DIR)../../scripts/bpf_helpers_doc.py --filename $< > $@
+
+$(OUTPUT)%.7: $(OUTPUT)%.rst
+ifndef RST2MAN_DEP
+       $(error "rst2man not found, but required to generate man pages")
+endif
+       $(QUIET_GEN)rst2man $< > $@
+
+helpers-clean:
+       $(call QUIET_CLEAN, eBPF_helpers-manpage)
+       $(Q)$(RM) $(DOC_MAN7) $(OUTPUT)$(HELPERS_RST)
+
+helpers-install: helpers
+       $(call QUIET_INSTALL, eBPF_helpers-manpage)
+       $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man7dir)
+       $(Q)$(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir)
+
+helpers-uninstall:
+       $(call QUIET_UNINST, eBPF_helpers-manpage)
+       $(Q)$(RM) $(addprefix $(DESTDIR)$(man7dir)/,$(_DOC_MAN7))
+       $(Q)$(RMDIR) $(DESTDIR)$(man7dir)
+
+.PHONY: helpers helpers-clean helpers-install helpers-uninstall
index a9d47c1558bb104ad9c3b0ba913bec64ff32fc72..f7663a3e60c91605d10eb301d044def46bdaa6e7 100644 (file)
@@ -15,12 +15,15 @@ prefix ?= /usr/local
 mandir ?= $(prefix)/man
 man8dir = $(mandir)/man8
 
-MAN8_RST = $(wildcard *.rst)
+# Load targets for building eBPF helpers man page.
+include ../../Makefile.helpers
+
+MAN8_RST = $(filter-out $(HELPERS_RST),$(wildcard *.rst))
 
 _DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST))
 DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
 
-man: man8
+man: man8 helpers
 man8: $(DOC_MAN8)
 
 RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
@@ -31,16 +34,16 @@ ifndef RST2MAN_DEP
 endif
        $(QUIET_GEN)rst2man $< > $@
 
-clean:
+clean: helpers-clean
        $(call QUIET_CLEAN, Documentation)
        $(Q)$(RM) $(DOC_MAN8)
 
-install: man
+install: man helpers-install
        $(call QUIET_INSTALL, Documentation-man)
        $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir)
        $(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir)
 
-uninstall:
+uninstall: helpers-uninstall
        $(call QUIET_UNINST, Documentation-man)
        $(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8))
        $(Q)$(RMDIR) $(DESTDIR)$(man8dir)
index 7b0e6d453e922f4db9f4536c217a76acd6b10d4b..edbe81534c6d2941b955cd0ab15cf845110fb130 100644 (file)
@@ -15,12 +15,13 @@ SYNOPSIS
        *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
 
        *COMMANDS* :=
-       { **show** | **list** | **attach** | **detach** | **help** }
+       { **show** | **list** | **tree** | **attach** | **detach** | **help** }
 
 MAP COMMANDS
 =============
 
 |      **bpftool** **cgroup { show | list }** *CGROUP*
+|      **bpftool** **cgroup tree** [*CGROUP_ROOT*]
 |      **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
 |      **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
 |      **bpftool** **cgroup help**
@@ -39,6 +40,15 @@ DESCRIPTION
                  Output will start with program ID followed by attach type,
                  attach flags and program name.
 
+       **bpftool cgroup tree** [*CGROUP_ROOT*]
+                 Iterate over all cgroups in *CGROUP_ROOT* and list all
+                 attached programs. If *CGROUP_ROOT* is not specified,
+                 bpftool uses cgroup v2 mountpoint.
+
+                 The output is similar to the output of cgroup show/list
+                 commands: it starts with absolute cgroup path, followed by
+                 program ID, attach type, attach flags and program name.
+
        **bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
                  Attach program *PROG* to the cgroup *CGROUP* with attach type
                  *ATTACH_TYPE* and optional *ATTACH_FLAGS*.
index 43d34a5c3ec527a95b7bbde6f28616597faf9d66..64156a16d5300b64e0d9c2f5297605b88e6265b2 100644 (file)
@@ -24,10 +24,20 @@ MAP COMMANDS
 |      **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}]
 |      **bpftool** **prog dump jited**  *PROG* [{**file** *FILE* | **opcodes**}]
 |      **bpftool** **prog pin** *PROG* *FILE*
-|      **bpftool** **prog load** *OBJ* *FILE*
+|      **bpftool** **prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
 |      **bpftool** **prog help**
 |
+|      *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
 |      *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+|      *TYPE* := {
+|              **socket** | **kprobe** | **kretprobe** | **classifier** | **action** |
+|              **tracepoint** | **raw_tracepoint** | **xdp** | **perf_event** | **cgroup/skb** |
+|              **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** |
+|              **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** |
+|              **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
+|              **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6**
+|      }
+
 
 DESCRIPTION
 ===========
@@ -64,8 +74,19 @@ DESCRIPTION
 
                  Note: *FILE* must be located in *bpffs* mount.
 
-       **bpftool prog load** *OBJ* *FILE*
+       **bpftool prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
                  Load bpf program from binary *OBJ* and pin as *FILE*.
+                 **type** is optional, if not specified program type will be
+                 inferred from section names.
+                 By default bpftool will create new maps as declared in the ELF
+                 object being loaded.  **map** parameter allows for the reuse
+                 of existing maps.  It can be specified multiple times, each
+                 time for a different map.  *IDX* refers to index of the map
+                 to be replaced in the ELF file counting from 0, while *NAME*
+                 allows to replace a map by name.  *MAP* specifies the map to
+                 use, referring to it by **id** or through a **pinned** file.
+                 If **dev** *NAME* is specified program will be loaded onto
+                 given networking device (offload).
 
                  Note: *FILE* must be located in *bpffs* mount.
 
@@ -159,6 +180,14 @@ EXAMPLES
     mov    %rbx,0x0(%rbp)
     48 89 5d 00
 
+|
+| **# bpftool prog load xdp1_kern.o /sys/fs/bpf/xdp1 type xdp map name rxcnt id 7**
+| **# bpftool prog show pinned /sys/fs/bpf/xdp1**
+|   9: xdp  name xdp_prog1  tag 539ec6ce11b52f98  gpl
+|      loaded_at 2018-06-25T16:17:31-0700  uid 0
+|      xlated 488B  jited 336B  memlock 4096B  map_ids 7
+| **# rm /sys/fs/bpf/xdp1**
+|
 
 SEE ALSO
 ========
index 892dbf095bffd79ac6bbc9b3a70a33efa3955f54..6c4830e188798277453ae0097b96b5886bc36c9e 100644 (file)
@@ -23,7 +23,7 @@ endif
 
 LIBBPF = $(BPF_PATH)libbpf.a
 
-BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
+BPFTOOL_VERSION := $(shell make --no-print-directory -sC ../../.. kernelversion)
 
 $(LIBBPF): FORCE
        $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
@@ -52,7 +52,7 @@ INSTALL ?= install
 RM ?= rm -f
 
 FEATURE_USER = .bpftool
-FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_TESTS = libbfd disassembler-four-args reallocarray
 FEATURE_DISPLAY = libbfd disassembler-four-args
 
 check_feat := 1
@@ -75,6 +75,10 @@ ifeq ($(feature-disassembler-four-args), 1)
 CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
 endif
 
+ifeq ($(feature-reallocarray), 0)
+CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+endif
+
 include $(wildcard $(OUTPUT)*.d)
 
 all: $(OUTPUT)bpftool
index 1e108332164367a769c798dfd75c89dcca9e413b..598066c401912a5d69f77ccc68c9fa78da8566e9 100644 (file)
@@ -99,6 +99,35 @@ _bpftool_get_prog_tags()
         command sed -n 's/.*"tag": "\(.*\)",$/\1/p' )" -- "$cur" ) )
 }
 
+_bpftool_get_obj_map_names()
+{
+    local obj
+
+    obj=$1
+
+    maps=$(objdump -j maps -t $obj 2>/dev/null | \
+        command awk '/g     . maps/ {print $NF}')
+
+    COMPREPLY+=( $( compgen -W "$maps" -- "$cur" ) )
+}
+
+_bpftool_get_obj_map_idxs()
+{
+    local obj
+
+    obj=$1
+
+    nmaps=$(objdump -j maps -t $obj 2>/dev/null | grep -c 'g     . maps')
+
+    COMPREPLY+=( $( compgen -W "$(seq 0 $((nmaps - 1)))" -- "$cur" ) )
+}
+
+_sysfs_get_netdevs()
+{
+    COMPREPLY+=( $( compgen -W "$( ls /sys/class/net 2>/dev/null )" -- \
+        "$cur" ) )
+}
+
 # For bpftool map update: retrieve type of the map to update.
 _bpftool_map_update_map_type()
 {
@@ -153,6 +182,13 @@ _bpftool()
     local cur prev words objword
     _init_completion || return
 
+    # Deal with options
+    if [[ ${words[cword]} == -* ]]; then
+        local c='--version --json --pretty --bpffs'
+        COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
+        return 0
+    fi
+
     # Deal with simplest keywords
     case $prev in
         help|hex|opcodes|visual)
@@ -172,20 +208,23 @@ _bpftool()
             ;;
     esac
 
-    # Search for object and command
-    local object command cmdword
-    for (( cmdword=1; cmdword < ${#words[@]}-1; cmdword++ )); do
-        [[ -n $object ]] && command=${words[cmdword]} && break
-        [[ ${words[cmdword]} != -* ]] && object=${words[cmdword]}
+    # Remove all options so completions don't have to deal with them.
+    local i
+    for (( i=1; i < ${#words[@]}; )); do
+        if [[ ${words[i]::1} == - ]]; then
+            words=( "${words[@]:0:i}" "${words[@]:i+1}" )
+            [[ $i -le $cword ]] && cword=$(( cword - 1 ))
+        else
+            i=$(( ++i ))
+        fi
     done
+    cur=${words[cword]}
+    prev=${words[cword - 1]}
+
+    local object=${words[1]} command=${words[2]}
 
-    if [[ -z $object ]]; then
+    if [[ -z $object || $cword -eq 1 ]]; then
         case $cur in
-            -*)
-                local c='--version --json --pretty'
-                COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
-                return 0
-                ;;
             *)
                 COMPREPLY=( $( compgen -W "$( bpftool help 2>&1 | \
                     command sed \
@@ -204,12 +243,14 @@ _bpftool()
     # Completion depends on object and command in use
     case $object in
         prog)
-            case $prev in
-                id)
-                    _bpftool_get_prog_ids
-                    return 0
-                    ;;
-            esac
+            if [[ $command != "load" ]]; then
+                case $prev in
+                    id)
+                        _bpftool_get_prog_ids
+                        return 0
+                        ;;
+                esac
+            fi
 
             local PROG_TYPE='id pinned tag'
             case $command in
@@ -252,8 +293,57 @@ _bpftool()
                     return 0
                     ;;
                 load)
-                    _filedir
-                    return 0
+                    local obj
+
+                    if [[ ${#words[@]} -lt 6 ]]; then
+                        _filedir
+                        return 0
+                    fi
+
+                    obj=${words[3]}
+
+                    if [[ ${words[-4]} == "map" ]]; then
+                        COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
+                        return 0
+                    fi
+                    if [[ ${words[-3]} == "map" ]]; then
+                        if [[ ${words[-2]} == "idx" ]]; then
+                            _bpftool_get_obj_map_idxs $obj
+                        elif [[ ${words[-2]} == "name" ]]; then
+                            _bpftool_get_obj_map_names $obj
+                        fi
+                        return 0
+                    fi
+                    if [[ ${words[-2]} == "map" ]]; then
+                        COMPREPLY=( $( compgen -W "idx name" -- "$cur" ) )
+                        return 0
+                    fi
+
+                    case $prev in
+                        type)
+                            COMPREPLY=( $( compgen -W "socket kprobe kretprobe classifier action tracepoint raw_tracepoint xdp perf_event cgroup/skb cgroup/sock cgroup/dev lwt_in lwt_out lwt_xmit lwt_seg6local sockops sk_skb sk_msg lirc_mode2 cgroup/bind4 cgroup/bind6 cgroup/connect4 cgroup/connect6 cgroup/sendmsg4 cgroup/sendmsg6 cgroup/post_bind4 cgroup/post_bind6" -- \
+                                                   "$cur" ) )
+                            return 0
+                            ;;
+                        id)
+                            _bpftool_get_map_ids
+                            return 0
+                            ;;
+                        pinned)
+                            _filedir
+                            return 0
+                            ;;
+                        dev)
+                            _sysfs_get_netdevs
+                            return 0
+                            ;;
+                        *)
+                            COMPREPLY=( $( compgen -W "map" -- "$cur" ) )
+                            _bpftool_once_attr 'type'
+                            _bpftool_once_attr 'dev'
+                            return 0
+                            ;;
+                    esac
                     ;;
                 *)
                     [[ $prev == $object ]] && \
@@ -404,6 +494,10 @@ _bpftool()
                     _filedir
                     return 0
                     ;;
+               tree)
+                   _filedir
+                   return 0
+                   ;;
                 attach|detach)
                     local ATTACH_TYPES='ingress egress sock_create sock_ops \
                         device bind4 bind6 post_bind4 post_bind6 connect4 \
@@ -445,7 +539,7 @@ _bpftool()
                 *)
                     [[ $prev == $object ]] && \
                         COMPREPLY=( $( compgen -W 'help attach detach \
-                            show list' -- "$cur" ) )
+                            show list tree' -- "$cur" ) )
                     ;;
             esac
             ;;
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
new file mode 100644 (file)
index 0000000..55bc512
--- /dev/null
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Facebook */
+
+#include <ctype.h>
+#include <stdio.h> /* for (FILE *) used by json_writer */
+#include <string.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/btf.h>
+#include <linux/err.h>
+
+#include "btf.h"
+#include "json_writer.h"
+#include "main.h"
+
+#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
+#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
+#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
+#define BITS_ROUNDUP_BYTES(bits) \
+       (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
+
+static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
+                             __u8 bit_offset, const void *data);
+
+static void btf_dumper_ptr(const void *data, json_writer_t *jw,
+                          bool is_plain_text)
+{
+       if (is_plain_text)
+               jsonw_printf(jw, "%p", *(unsigned long *)data);
+       else
+               jsonw_printf(jw, "%u", *(unsigned long *)data);
+}
+
+static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
+                              const void *data)
+{
+       int actual_type_id;
+
+       actual_type_id = btf__resolve_type(d->btf, type_id);
+       if (actual_type_id < 0)
+               return actual_type_id;
+
+       return btf_dumper_do_type(d, actual_type_id, 0, data);
+}
+
+static void btf_dumper_enum(const void *data, json_writer_t *jw)
+{
+       jsonw_printf(jw, "%d", *(int *)data);
+}
+
+static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
+                           const void *data)
+{
+       const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+       struct btf_array *arr = (struct btf_array *)(t + 1);
+       long long elem_size;
+       int ret = 0;
+       __u32 i;
+
+       elem_size = btf__resolve_size(d->btf, arr->type);
+       if (elem_size < 0)
+               return elem_size;
+
+       jsonw_start_array(d->jw);
+       for (i = 0; i < arr->nelems; i++) {
+               ret = btf_dumper_do_type(d, arr->type, 0,
+                                        data + i * elem_size);
+               if (ret)
+                       break;
+       }
+
+       jsonw_end_array(d->jw);
+       return ret;
+}
+
+static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
+                               const void *data, json_writer_t *jw,
+                               bool is_plain_text)
+{
+       int left_shift_bits, right_shift_bits;
+       int nr_bits = BTF_INT_BITS(int_type);
+       int total_bits_offset;
+       int bytes_to_copy;
+       int bits_to_copy;
+       __u64 print_num;
+
+       total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
+       data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
+       bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+       bits_to_copy = bit_offset + nr_bits;
+       bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
+
+       print_num = 0;
+       memcpy(&print_num, data, bytes_to_copy);
+#if defined(__BIG_ENDIAN_BITFIELD)
+       left_shift_bits = bit_offset;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       left_shift_bits = 64 - bits_to_copy;
+#else
+#error neither big nor little endian
+#endif
+       right_shift_bits = 64 - nr_bits;
+
+       print_num <<= left_shift_bits;
+       print_num >>= right_shift_bits;
+       if (is_plain_text)
+               jsonw_printf(jw, "0x%llx", print_num);
+       else
+               jsonw_printf(jw, "%llu", print_num);
+}
+
+static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
+                         const void *data, json_writer_t *jw,
+                         bool is_plain_text)
+{
+       __u32 *int_type;
+       __u32 nr_bits;
+
+       int_type = (__u32 *)(t + 1);
+       nr_bits = BTF_INT_BITS(*int_type);
+       /* if this is bit field */
+       if (bit_offset || BTF_INT_OFFSET(*int_type) ||
+           BITS_PER_BYTE_MASKED(nr_bits)) {
+               btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+                                   is_plain_text);
+               return 0;
+       }
+
+       switch (BTF_INT_ENCODING(*int_type)) {
+       case 0:
+               if (BTF_INT_BITS(*int_type) == 64)
+                       jsonw_printf(jw, "%lu", *(__u64 *)data);
+               else if (BTF_INT_BITS(*int_type) == 32)
+                       jsonw_printf(jw, "%u", *(__u32 *)data);
+               else if (BTF_INT_BITS(*int_type) == 16)
+                       jsonw_printf(jw, "%hu", *(__u16 *)data);
+               else if (BTF_INT_BITS(*int_type) == 8)
+                       jsonw_printf(jw, "%hhu", *(__u8 *)data);
+               else
+                       btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+                                           is_plain_text);
+               break;
+       case BTF_INT_SIGNED:
+               if (BTF_INT_BITS(*int_type) == 64)
+                       jsonw_printf(jw, "%ld", *(long long *)data);
+               else if (BTF_INT_BITS(*int_type) == 32)
+                       jsonw_printf(jw, "%d", *(int *)data);
+               else if (BTF_INT_BITS(*int_type) == 16)
+                       jsonw_printf(jw, "%hd", *(short *)data);
+               else if (BTF_INT_BITS(*int_type) == 8)
+                       jsonw_printf(jw, "%hhd", *(char *)data);
+               else
+                       btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+                                           is_plain_text);
+               break;
+       case BTF_INT_CHAR:
+               if (isprint(*(char *)data))
+                       jsonw_printf(jw, "\"%c\"", *(char *)data);
+               else
+                       if (is_plain_text)
+                               jsonw_printf(jw, "0x%hhx", *(char *)data);
+                       else
+                               jsonw_printf(jw, "\"\\u00%02hhx\"",
+                                            *(char *)data);
+               break;
+       case BTF_INT_BOOL:
+               jsonw_bool(jw, *(int *)data);
+               break;
+       default:
+               /* shouldn't happen */
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
+                            const void *data)
+{
+       const struct btf_type *t;
+       struct btf_member *m;
+       const void *data_off;
+       int ret = 0;
+       int i, vlen;
+
+       t = btf__type_by_id(d->btf, type_id);
+       if (!t)
+               return -EINVAL;
+
+       vlen = BTF_INFO_VLEN(t->info);
+       jsonw_start_object(d->jw);
+       m = (struct btf_member *)(t + 1);
+
+       for (i = 0; i < vlen; i++) {
+               data_off = data + BITS_ROUNDDOWN_BYTES(m[i].offset);
+               jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
+               ret = btf_dumper_do_type(d, m[i].type,
+                                        BITS_PER_BYTE_MASKED(m[i].offset),
+                                        data_off);
+               if (ret)
+                       break;
+       }
+
+       jsonw_end_object(d->jw);
+
+       return ret;
+}
+
+static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
+                             __u8 bit_offset, const void *data)
+{
+       const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+
+       switch (BTF_INFO_KIND(t->info)) {
+       case BTF_KIND_INT:
+               return btf_dumper_int(t, bit_offset, data, d->jw,
+                                    d->is_plain_text);
+       case BTF_KIND_STRUCT:
+       case BTF_KIND_UNION:
+               return btf_dumper_struct(d, type_id, data);
+       case BTF_KIND_ARRAY:
+               return btf_dumper_array(d, type_id, data);
+       case BTF_KIND_ENUM:
+               btf_dumper_enum(data, d->jw);
+               return 0;
+       case BTF_KIND_PTR:
+               btf_dumper_ptr(data, d->jw, d->is_plain_text);
+               return 0;
+       case BTF_KIND_UNKN:
+               jsonw_printf(d->jw, "(unknown)");
+               return 0;
+       case BTF_KIND_FWD:
+               /* map key or value can't be forward */
+               jsonw_printf(d->jw, "(fwd-kind-invalid)");
+               return -EINVAL;
+       case BTF_KIND_TYPEDEF:
+       case BTF_KIND_VOLATILE:
+       case BTF_KIND_CONST:
+       case BTF_KIND_RESTRICT:
+               return btf_dumper_modifier(d, type_id, data);
+       default:
+               jsonw_printf(d->jw, "(unsupported-kind");
+               return -EINVAL;
+       }
+}
+
+int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
+                   const void *data)
+{
+       return btf_dumper_do_type(d, type_id, 0, data);
+}
index 16bee011e16cc883e99fcc49451b896ce713d636..ee7a9765c6b32f3eb9e22b79f8a112dd38983405 100644 (file)
@@ -2,7 +2,12 @@
 // Copyright (C) 2017 Facebook
 // Author: Roman Gushchin <guro@fb.com>
 
+#define _XOPEN_SOURCE 500
+#include <errno.h>
 #include <fcntl.h>
+#include <ftw.h>
+#include <mntent.h>
+#include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <sys/stat.h>
@@ -53,7 +58,8 @@ static enum bpf_attach_type parse_attach_type(const char *str)
 }
 
 static int show_bpf_prog(int id, const char *attach_type_str,
-                        const char *attach_flags_str)
+                        const char *attach_flags_str,
+                        int level)
 {
        struct bpf_prog_info info = {};
        __u32 info_len = sizeof(info);
@@ -78,7 +84,8 @@ static int show_bpf_prog(int id, const char *attach_type_str,
                jsonw_string_field(json_wtr, "name", info.name);
                jsonw_end_object(json_wtr);
        } else {
-               printf("%-8u %-15s %-15s %-15s\n", info.id,
+               printf("%s%-8u %-15s %-15s %-15s\n", level ? "    " : "",
+                      info.id,
                       attach_type_str,
                       attach_flags_str,
                       info.name);
@@ -88,7 +95,20 @@ static int show_bpf_prog(int id, const char *attach_type_str,
        return 0;
 }
 
-static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+{
+       __u32 prog_cnt = 0;
+       int ret;
+
+       ret = bpf_prog_query(cgroup_fd, type, 0, NULL, NULL, &prog_cnt);
+       if (ret)
+               return -1;
+
+       return prog_cnt;
+}
+
+static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+                                  int level)
 {
        __u32 prog_ids[1024] = {0};
        char *attach_flags_str;
@@ -123,7 +143,7 @@ static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
 
        for (iter = 0; iter < prog_cnt; iter++)
                show_bpf_prog(prog_ids[iter], attach_type_strings[type],
-                             attach_flags_str);
+                             attach_flags_str, level);
 
        return 0;
 }
@@ -161,7 +181,7 @@ static int do_show(int argc, char **argv)
                 * If we were able to get the show for at least one
                 * attach type, let's return 0.
                 */
-               if (show_attached_bpf_progs(cgroup_fd, type) == 0)
+               if (show_attached_bpf_progs(cgroup_fd, type, 0) == 0)
                        ret = 0;
        }
 
@@ -173,6 +193,143 @@ static int do_show(int argc, char **argv)
        return ret;
 }
 
+/*
+ * To distinguish nftw() errors and do_show_tree_fn() errors
+ * and avoid duplicating error messages, let's return -2
+ * from do_show_tree_fn() in case of error.
+ */
+#define NFTW_ERR               -1
+#define SHOW_TREE_FN_ERR       -2
+static int do_show_tree_fn(const char *fpath, const struct stat *sb,
+                          int typeflag, struct FTW *ftw)
+{
+       enum bpf_attach_type type;
+       bool skip = true;
+       int cgroup_fd;
+
+       if (typeflag != FTW_D)
+               return 0;
+
+       cgroup_fd = open(fpath, O_RDONLY);
+       if (cgroup_fd < 0) {
+               p_err("can't open cgroup %s: %s", fpath, strerror(errno));
+               return SHOW_TREE_FN_ERR;
+       }
+
+       for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+               int count = count_attached_bpf_progs(cgroup_fd, type);
+
+               if (count < 0 && errno != EINVAL) {
+                       p_err("can't query bpf programs attached to %s: %s",
+                             fpath, strerror(errno));
+                       close(cgroup_fd);
+                       return SHOW_TREE_FN_ERR;
+               }
+               if (count > 0) {
+                       skip = false;
+                       break;
+               }
+       }
+
+       if (skip) {
+               close(cgroup_fd);
+               return 0;
+       }
+
+       if (json_output) {
+               jsonw_start_object(json_wtr);
+               jsonw_string_field(json_wtr, "cgroup", fpath);
+               jsonw_name(json_wtr, "programs");
+               jsonw_start_array(json_wtr);
+       } else {
+               printf("%s\n", fpath);
+       }
+
+       for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
+               show_attached_bpf_progs(cgroup_fd, type, ftw->level);
+
+       if (json_output) {
+               jsonw_end_array(json_wtr);
+               jsonw_end_object(json_wtr);
+       }
+
+       close(cgroup_fd);
+
+       return 0;
+}
+
+static char *find_cgroup_root(void)
+{
+       struct mntent *mnt;
+       FILE *f;
+
+       f = fopen("/proc/mounts", "r");
+       if (f == NULL)
+               return NULL;
+
+       while ((mnt = getmntent(f))) {
+               if (strcmp(mnt->mnt_type, "cgroup2") == 0) {
+                       fclose(f);
+                       return strdup(mnt->mnt_dir);
+               }
+       }
+
+       fclose(f);
+       return NULL;
+}
+
+static int do_show_tree(int argc, char **argv)
+{
+       char *cgroup_root;
+       int ret;
+
+       switch (argc) {
+       case 0:
+               cgroup_root = find_cgroup_root();
+               if (!cgroup_root) {
+                       p_err("cgroup v2 isn't mounted");
+                       return -1;
+               }
+               break;
+       case 1:
+               cgroup_root = argv[0];
+               break;
+       default:
+               p_err("too many parameters for cgroup tree");
+               return -1;
+       }
+
+
+       if (json_output)
+               jsonw_start_array(json_wtr);
+       else
+               printf("%s\n"
+                      "%-8s %-15s %-15s %-15s\n",
+                      "CgroupPath",
+                      "ID", "AttachType", "AttachFlags", "Name");
+
+       switch (nftw(cgroup_root, do_show_tree_fn, 1024, FTW_MOUNT)) {
+       case NFTW_ERR:
+               p_err("can't iterate over %s: %s", cgroup_root,
+                     strerror(errno));
+               ret = -1;
+               break;
+       case SHOW_TREE_FN_ERR:
+               ret = -1;
+               break;
+       default:
+               ret = 0;
+       }
+
+       if (json_output)
+               jsonw_end_array(json_wtr);
+
+       if (argc == 0)
+               free(cgroup_root);
+
+       return ret;
+}
+
 static int do_attach(int argc, char **argv)
 {
        enum bpf_attach_type attach_type;
@@ -289,6 +446,7 @@ static int do_help(int argc, char **argv)
 
        fprintf(stderr,
                "Usage: %s %s { show | list } CGROUP\n"
+               "       %s %s tree [CGROUP_ROOT]\n"
                "       %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
                "       %s %s detach CGROUP ATTACH_TYPE PROG\n"
                "       %s %s help\n"
@@ -298,6 +456,7 @@ static int do_help(int argc, char **argv)
                "       " HELP_SPEC_PROGRAM "\n"
                "       " HELP_SPEC_OPTIONS "\n"
                "",
+               bin_name, argv[-2],
                bin_name, argv[-2], bin_name, argv[-2],
                bin_name, argv[-2], bin_name, argv[-2]);
 
@@ -307,6 +466,7 @@ static int do_help(int argc, char **argv)
 static const struct cmd cmds[] = {
        { "show",       do_show },
        { "list",       do_show },
+       { "tree",       do_show_tree },
        { "attach",     do_attach },
        { "detach",     do_detach },
        { "help",       do_help },
index 32f9e397a6c07a988edd80a3338fd2a29930d3e0..b432daea452061a78ec0c2156e8182bdb4584fbb 100644 (file)
@@ -31,8 +31,6 @@
  * SOFTWARE.
  */
 
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
 #include <ctype.h>
 #include <errno.h>
 #include <fcntl.h>
index eea7f14355f3273f2885e370cbd5a47a99d0bded..d15a62be6cf0fd0bbe306e9420fc07d9848ce679 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,8 +31,6 @@
  * SOFTWARE.
  */
 
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
 #include <bfd.h>
 #include <ctype.h>
 #include <errno.h>
index 63fdb310b9a4aceddad6f5c935c4075d3f9b522e..238e734d75b3eb616e287f0eb47ff5645d94eccd 100644 (file)
@@ -31,8 +31,6 @@
  * SOFTWARE.
  */
 
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
 #ifndef __BPF_TOOL_H
 #define __BPF_TOOL_H
 
@@ -44,6 +42,7 @@
 #include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/hashtable.h>
+#include <tools/libc_compat.h>
 
 #include "json_writer.h"
 
 #define NEXT_ARG()     ({ argc--; argv++; if (argc < 0) usage(); })
 #define NEXT_ARGP()    ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
 #define BAD_ARG()      ({ p_err("what is '%s'?", *argv); -1; })
+#define GET_ARG()      ({ argc--; *argv++; })
+#define REQ_ARGS(cnt)                                                  \
+       ({                                                              \
+               int _cnt = (cnt);                                       \
+               bool _res;                                              \
+                                                                       \
+               if (argc < _cnt) {                                      \
+                       p_err("'%s' needs at least %d arguments, %d found", \
+                             argv[-1], _cnt, argc);                    \
+                       _res = false;                                   \
+               } else {                                                \
+                       _res = true;                                    \
+               }                                                       \
+               _res;                                                   \
+       })
 
 #define ERR_MAX_LEN    1024
 
@@ -61,6 +75,8 @@
        "PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }"
 #define HELP_SPEC_OPTIONS                                              \
        "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} }"
+#define HELP_SPEC_MAP                                                  \
+       "MAP := { id MAP_ID | pinned FILE }"
 
 enum bpf_obj_type {
        BPF_OBJ_UNKNOWN,
@@ -122,6 +138,7 @@ int do_cgroup(int argc, char **arg);
 int do_perf(int argc, char **arg);
 
 int prog_parse_fd(int *argc, char ***argv);
+int map_parse_fd(int *argc, char ***argv);
 int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
 
 void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
@@ -133,4 +150,19 @@ unsigned int get_page_size(void);
 unsigned int get_possible_cpus(void);
 const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino);
 
+struct btf_dumper {
+       const struct btf *btf;
+       json_writer_t *jw;
+       bool is_plain_text;
+};
+
+/* btf_dumper_type - print data along with type information
+ * @d: an instance containing context for dumping types
+ * @type_id: index in btf->types array. this points to the type to be dumped
+ * @data: pointer the actual data, i.e. the values to be printed
+ *
+ * Returns zero on success and negative error code otherwise
+ */
+int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
+                   const void *data);
 #endif
index 097b1a5e046b20f8ced90a469c2b8e5ab25bb593..9c81918455850c411eea9306fdd189a55a89fa6e 100644 (file)
  * SOFTWARE.
  */
 
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
 #include <assert.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <linux/err.h>
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -46,6 +45,8 @@
 
 #include <bpf.h>
 
+#include "btf.h"
+#include "json_writer.h"
 #include "main.h"
 
 static const char * const map_type_name[] = {
@@ -95,7 +96,7 @@ static void *alloc_value(struct bpf_map_info *info)
                return malloc(info->value_size);
 }
 
-static int map_parse_fd(int *argc, char ***argv)
+int map_parse_fd(int *argc, char ***argv)
 {
        int fd;
 
@@ -150,8 +151,109 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
        return fd;
 }
 
+static int do_dump_btf(const struct btf_dumper *d,
+                      struct bpf_map_info *map_info, void *key,
+                      void *value)
+{
+       int ret;
+
+       /* start of key-value pair */
+       jsonw_start_object(d->jw);
+
+       jsonw_name(d->jw, "key");
+
+       ret = btf_dumper_type(d, map_info->btf_key_type_id, key);
+       if (ret)
+               goto err_end_obj;
+
+       jsonw_name(d->jw, "value");
+
+       ret = btf_dumper_type(d, map_info->btf_value_type_id, value);
+
+err_end_obj:
+       /* end of key-value pair */
+       jsonw_end_object(d->jw);
+
+       return ret;
+}
+
+static int get_btf(struct bpf_map_info *map_info, struct btf **btf)
+{
+       struct bpf_btf_info btf_info = { 0 };
+       __u32 len = sizeof(btf_info);
+       __u32 last_size;
+       int btf_fd;
+       void *ptr;
+       int err;
+
+       err = 0;
+       *btf = NULL;
+       btf_fd = bpf_btf_get_fd_by_id(map_info->btf_id);
+       if (btf_fd < 0)
+               return 0;
+
+       /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
+        * let's start with a sane default - 4KiB here - and resize it only if
+        * bpf_obj_get_info_by_fd() needs a bigger buffer.
+        */
+       btf_info.btf_size = 4096;
+       last_size = btf_info.btf_size;
+       ptr = malloc(last_size);
+       if (!ptr) {
+               err = -ENOMEM;
+               goto exit_free;
+       }
+
+       bzero(ptr, last_size);
+       btf_info.btf = ptr_to_u64(ptr);
+       err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+
+       if (!err && btf_info.btf_size > last_size) {
+               void *temp_ptr;
+
+               last_size = btf_info.btf_size;
+               temp_ptr = realloc(ptr, last_size);
+               if (!temp_ptr) {
+                       err = -ENOMEM;
+                       goto exit_free;
+               }
+               ptr = temp_ptr;
+               bzero(ptr, last_size);
+               btf_info.btf = ptr_to_u64(ptr);
+               err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+       }
+
+       if (err || btf_info.btf_size > last_size) {
+               err = errno;
+               goto exit_free;
+       }
+
+       *btf = btf__new((__u8 *)btf_info.btf, btf_info.btf_size, NULL);
+       if (IS_ERR(*btf)) {
+               err = PTR_ERR(btf);
+               *btf = NULL;
+       }
+
+exit_free:
+       close(btf_fd);
+       free(ptr);
+
+       return err;
+}
+
+static json_writer_t *get_btf_writer(void)
+{
+       json_writer_t *jw = jsonw_new(stdout);
+
+       if (!jw)
+               return NULL;
+       jsonw_pretty(jw, true);
+
+       return jw;
+}
+
 static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
-                            unsigned char *value)
+                            unsigned char *value, struct btf *btf)
 {
        jsonw_start_object(json_wtr);
 
@@ -160,6 +262,16 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
                print_hex_data_json(key, info->key_size);
                jsonw_name(json_wtr, "value");
                print_hex_data_json(value, info->value_size);
+               if (btf) {
+                       struct btf_dumper d = {
+                               .btf = btf,
+                               .jw = json_wtr,
+                               .is_plain_text = false,
+                       };
+
+                       jsonw_name(json_wtr, "formatted");
+                       do_dump_btf(&d, info, key, value);
+               }
        } else {
                unsigned int i, n;
 
@@ -510,10 +622,12 @@ static int do_show(int argc, char **argv)
 
 static int do_dump(int argc, char **argv)
 {
+       struct bpf_map_info info = {};
        void *key, *value, *prev_key;
        unsigned int num_elems = 0;
-       struct bpf_map_info info = {};
        __u32 len = sizeof(info);
+       json_writer_t *btf_wtr;
+       struct btf *btf = NULL;
        int err;
        int fd;
 
@@ -539,8 +653,27 @@ static int do_dump(int argc, char **argv)
        }
 
        prev_key = NULL;
+
+       err = get_btf(&info, &btf);
+       if (err) {
+               p_err("failed to get btf");
+               goto exit_free;
+       }
+
        if (json_output)
                jsonw_start_array(json_wtr);
+       else
+               if (btf) {
+                       btf_wtr = get_btf_writer();
+                       if (!btf_wtr) {
+                               p_info("failed to create json writer for btf. falling back to plain output");
+                               btf__free(btf);
+                               btf = NULL;
+                       } else {
+                               jsonw_start_array(btf_wtr);
+                       }
+               }
+
        while (true) {
                err = bpf_map_get_next_key(fd, prev_key, key);
                if (err) {
@@ -551,9 +684,19 @@ static int do_dump(int argc, char **argv)
 
                if (!bpf_map_lookup_elem(fd, key, value)) {
                        if (json_output)
-                               print_entry_json(&info, key, value);
+                               print_entry_json(&info, key, value, btf);
                        else
-                               print_entry_plain(&info, key, value);
+                               if (btf) {
+                                       struct btf_dumper d = {
+                                               .btf = btf,
+                                               .jw = btf_wtr,
+                                               .is_plain_text = true,
+                                       };
+
+                                       do_dump_btf(&d, &info, key, value);
+                               } else {
+                                       print_entry_plain(&info, key, value);
+                               }
                } else {
                        if (json_output) {
                                jsonw_name(json_wtr, "key");
@@ -576,14 +719,19 @@ static int do_dump(int argc, char **argv)
 
        if (json_output)
                jsonw_end_array(json_wtr);
-       else
+       else if (btf) {
+               jsonw_end_array(btf_wtr);
+               jsonw_destroy(&btf_wtr);
+       } else {
                printf("Found %u element%s\n", num_elems,
                       num_elems != 1 ? "s" : "");
+       }
 
 exit_free:
        free(key);
        free(value);
        close(fd);
+       btf__free(btf);
 
        return err;
 }
@@ -639,6 +787,8 @@ static int do_lookup(int argc, char **argv)
 {
        struct bpf_map_info info = {};
        __u32 len = sizeof(info);
+       json_writer_t *btf_wtr;
+       struct btf *btf = NULL;
        void *key, *value;
        int err;
        int fd;
@@ -663,27 +813,60 @@ static int do_lookup(int argc, char **argv)
                goto exit_free;
 
        err = bpf_map_lookup_elem(fd, key, value);
-       if (!err) {
-               if (json_output)
-                       print_entry_json(&info, key, value);
-               else
+       if (err) {
+               if (errno == ENOENT) {
+                       if (json_output) {
+                               jsonw_null(json_wtr);
+                       } else {
+                               printf("key:\n");
+                               fprint_hex(stdout, key, info.key_size, " ");
+                               printf("\n\nNot found\n");
+                       }
+               } else {
+                       p_err("lookup failed: %s", strerror(errno));
+               }
+
+               goto exit_free;
+       }
+
+       /* here means bpf_map_lookup_elem() succeeded */
+       err = get_btf(&info, &btf);
+       if (err) {
+               p_err("failed to get btf");
+               goto exit_free;
+       }
+
+       if (json_output) {
+               print_entry_json(&info, key, value, btf);
+       } else if (btf) {
+               /* if here json_wtr wouldn't have been initialised,
+                * so let's create separate writer for btf
+                */
+               btf_wtr = get_btf_writer();
+               if (!btf_wtr) {
+                       p_info("failed to create json writer for btf. falling back to plain output");
+                       btf__free(btf);
+                       btf = NULL;
                        print_entry_plain(&info, key, value);
-       } else if (errno == ENOENT) {
-               if (json_output) {
-                       jsonw_null(json_wtr);
                } else {
-                       printf("key:\n");
-                       fprint_hex(stdout, key, info.key_size, " ");
-                       printf("\n\nNot found\n");
+                       struct btf_dumper d = {
+                               .btf = btf,
+                               .jw = btf_wtr,
+                               .is_plain_text = true,
+                       };
+
+                       do_dump_btf(&d, &info, key, value);
+                       jsonw_destroy(&btf_wtr);
                }
        } else {
-               p_err("lookup failed: %s", strerror(errno));
+               print_entry_plain(&info, key, value);
        }
 
 exit_free:
        free(key);
        free(value);
        close(fd);
+       btf__free(btf);
 
        return err;
 }
@@ -826,7 +1009,7 @@ static int do_help(int argc, char **argv)
                "       %s %s event_pipe MAP [cpu N index M]\n"
                "       %s %s help\n"
                "\n"
-               "       MAP := { id MAP_ID | pinned FILE }\n"
+               "       " HELP_SPEC_MAP "\n"
                "       DATA := { [hex] BYTES }\n"
                "       " HELP_SPEC_PROGRAM "\n"
                "       VALUE := { DATA | MAP | PROG }\n"
index ac6b1a12c9b7cd6319dc3697500d56c938de8d90..b76b77dcfd1fcc52ded0a99b7e7f3cec8cde90ce 100644 (file)
@@ -29,9 +29,10 @@ static bool has_perf_query_support(void)
        if (perf_query_supported)
                goto out;
 
-       fd = open(bin_name, O_RDONLY);
+       fd = open("/", O_RDONLY);
        if (fd < 0) {
-               p_err("perf_query_support: %s", strerror(errno));
+               p_err("perf_query_support: cannot open directory \"/\" (%s)",
+                     strerror(errno));
                goto out;
        }
 
index a4f435203feff52f9d7c9a04bf8d5c10c31d2c73..dce960d22106fb173e2e5e0195f0472209d2bb71 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2017-2018 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,8 +31,7 @@
  * SOFTWARE.
  */
 
-/* Author: Jakub Kicinski <kubakici@wp.pl> */
-
+#define _GNU_SOURCE
 #include <errno.h>
 #include <fcntl.h>
 #include <stdarg.h>
 #include <string.h>
 #include <time.h>
 #include <unistd.h>
+#include <net/if.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 
+#include <linux/err.h>
+
 #include <bpf.h>
 #include <libbpf.h>
 
@@ -90,7 +92,9 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
        }
 
        wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
-               nsecs / 1000000000;
+               (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
+               1000000000;
+
 
        if (!localtime_r(&wallclock_secs, &load_tm)) {
                snprintf(buf, size, "%llu", nsecs / 1000000000);
@@ -679,28 +683,248 @@ static int do_pin(int argc, char **argv)
        return err;
 }
 
+struct map_replace {
+       int idx;
+       int fd;
+       char *name;
+};
+
+int map_replace_compar(const void *p1, const void *p2)
+{
+       const struct map_replace *a = p1, *b = p2;
+
+       return a->idx - b->idx;
+}
+
 static int do_load(int argc, char **argv)
 {
+       enum bpf_attach_type expected_attach_type;
+       struct bpf_object_open_attr attr = {
+               .prog_type      = BPF_PROG_TYPE_UNSPEC,
+       };
+       struct map_replace *map_replace = NULL;
+       unsigned int old_map_fds = 0;
+       struct bpf_program *prog;
        struct bpf_object *obj;
-       int prog_fd;
-
-       if (argc != 2)
-               usage();
+       struct bpf_map *map;
+       const char *pinfile;
+       unsigned int i, j;
+       __u32 ifindex = 0;
+       int idx, err;
 
-       if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) {
-               p_err("failed to load program");
+       if (!REQ_ARGS(2))
                return -1;
+       attr.file = GET_ARG();
+       pinfile = GET_ARG();
+
+       while (argc) {
+               if (is_prefix(*argv, "type")) {
+                       char *type;
+
+                       NEXT_ARG();
+
+                       if (attr.prog_type != BPF_PROG_TYPE_UNSPEC) {
+                               p_err("program type already specified");
+                               goto err_free_reuse_maps;
+                       }
+                       if (!REQ_ARGS(1))
+                               goto err_free_reuse_maps;
+
+                       /* Put a '/' at the end of type to appease libbpf */
+                       type = malloc(strlen(*argv) + 2);
+                       if (!type) {
+                               p_err("mem alloc failed");
+                               goto err_free_reuse_maps;
+                       }
+                       *type = 0;
+                       strcat(type, *argv);
+                       strcat(type, "/");
+
+                       err = libbpf_prog_type_by_name(type, &attr.prog_type,
+                                                      &expected_attach_type);
+                       free(type);
+                       if (err < 0) {
+                               p_err("unknown program type '%s'", *argv);
+                               goto err_free_reuse_maps;
+                       }
+                       NEXT_ARG();
+               } else if (is_prefix(*argv, "map")) {
+                       char *endptr, *name;
+                       int fd;
+
+                       NEXT_ARG();
+
+                       if (!REQ_ARGS(4))
+                               goto err_free_reuse_maps;
+
+                       if (is_prefix(*argv, "idx")) {
+                               NEXT_ARG();
+
+                               idx = strtoul(*argv, &endptr, 0);
+                               if (*endptr) {
+                                       p_err("can't parse %s as IDX", *argv);
+                                       goto err_free_reuse_maps;
+                               }
+                               name = NULL;
+                       } else if (is_prefix(*argv, "name")) {
+                               NEXT_ARG();
+
+                               name = *argv;
+                               idx = -1;
+                       } else {
+                               p_err("expected 'idx' or 'name', got: '%s'?",
+                                     *argv);
+                               goto err_free_reuse_maps;
+                       }
+                       NEXT_ARG();
+
+                       fd = map_parse_fd(&argc, &argv);
+                       if (fd < 0)
+                               goto err_free_reuse_maps;
+
+                       map_replace = reallocarray(map_replace, old_map_fds + 1,
+                                                  sizeof(*map_replace));
+                       if (!map_replace) {
+                               p_err("mem alloc failed");
+                               goto err_free_reuse_maps;
+                       }
+                       map_replace[old_map_fds].idx = idx;
+                       map_replace[old_map_fds].name = name;
+                       map_replace[old_map_fds].fd = fd;
+                       old_map_fds++;
+               } else if (is_prefix(*argv, "dev")) {
+                       NEXT_ARG();
+
+                       if (ifindex) {
+                               p_err("offload device already specified");
+                               goto err_free_reuse_maps;
+                       }
+                       if (!REQ_ARGS(1))
+                               goto err_free_reuse_maps;
+
+                       ifindex = if_nametoindex(*argv);
+                       if (!ifindex) {
+                               p_err("unrecognized netdevice '%s': %s",
+                                     *argv, strerror(errno));
+                               goto err_free_reuse_maps;
+                       }
+                       NEXT_ARG();
+               } else {
+                       p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
+                             *argv);
+                       goto err_free_reuse_maps;
+               }
        }
 
-       if (do_pin_fd(prog_fd, argv[1])) {
-               p_err("failed to pin program");
-               return -1;
+       obj = bpf_object__open_xattr(&attr);
+       if (IS_ERR_OR_NULL(obj)) {
+               p_err("failed to open object file");
+               goto err_free_reuse_maps;
+       }
+
+       prog = bpf_program__next(NULL, obj);
+       if (!prog) {
+               p_err("object file doesn't contain any bpf program");
+               goto err_close_obj;
+       }
+
+       bpf_program__set_ifindex(prog, ifindex);
+       if (attr.prog_type == BPF_PROG_TYPE_UNSPEC) {
+               const char *sec_name = bpf_program__title(prog, false);
+
+               err = libbpf_prog_type_by_name(sec_name, &attr.prog_type,
+                                              &expected_attach_type);
+               if (err < 0) {
+                       p_err("failed to guess program type based on section name %s\n",
+                             sec_name);
+                       goto err_close_obj;
+               }
+       }
+       bpf_program__set_type(prog, attr.prog_type);
+       bpf_program__set_expected_attach_type(prog, expected_attach_type);
+
+       qsort(map_replace, old_map_fds, sizeof(*map_replace),
+             map_replace_compar);
+
+       /* After the sort maps by name will be first on the list, because they
+        * have idx == -1.  Resolve them.
+        */
+       j = 0;
+       while (j < old_map_fds && map_replace[j].name) {
+               i = 0;
+               bpf_map__for_each(map, obj) {
+                       if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
+                               map_replace[j].idx = i;
+                               break;
+                       }
+                       i++;
+               }
+               if (map_replace[j].idx == -1) {
+                       p_err("unable to find map '%s'", map_replace[j].name);
+                       goto err_close_obj;
+               }
+               j++;
+       }
+       /* Resort if any names were resolved */
+       if (j)
+               qsort(map_replace, old_map_fds, sizeof(*map_replace),
+                     map_replace_compar);
+
+       /* Set ifindex and name reuse */
+       j = 0;
+       idx = 0;
+       bpf_map__for_each(map, obj) {
+               if (!bpf_map__is_offload_neutral(map))
+                       bpf_map__set_ifindex(map, ifindex);
+
+               if (j < old_map_fds && idx == map_replace[j].idx) {
+                       err = bpf_map__reuse_fd(map, map_replace[j++].fd);
+                       if (err) {
+                               p_err("unable to set up map reuse: %d", err);
+                               goto err_close_obj;
+                       }
+
+                       /* Next reuse wants to apply to the same map */
+                       if (j < old_map_fds && map_replace[j].idx == idx) {
+                               p_err("replacement for map idx %d specified more than once",
+                                     idx);
+                               goto err_close_obj;
+                       }
+               }
+
+               idx++;
+       }
+       if (j < old_map_fds) {
+               p_err("map idx '%d' not used", map_replace[j].idx);
+               goto err_close_obj;
        }
 
+       err = bpf_object__load(obj);
+       if (err) {
+               p_err("failed to load object file");
+               goto err_close_obj;
+       }
+
+       if (do_pin_fd(bpf_program__fd(prog), pinfile))
+               goto err_close_obj;
+
        if (json_output)
                jsonw_null(json_wtr);
 
+       bpf_object__close(obj);
+       for (i = 0; i < old_map_fds; i++)
+               close(map_replace[i].fd);
+       free(map_replace);
+
        return 0;
+
+err_close_obj:
+       bpf_object__close(obj);
+err_free_reuse_maps:
+       for (i = 0; i < old_map_fds; i++)
+               close(map_replace[i].fd);
+       free(map_replace);
+       return -1;
 }
 
 static int do_help(int argc, char **argv)
@@ -715,10 +939,19 @@ static int do_help(int argc, char **argv)
                "       %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n"
                "       %s %s dump jited  PROG [{ file FILE | opcodes }]\n"
                "       %s %s pin   PROG FILE\n"
-               "       %s %s load  OBJ  FILE\n"
+               "       %s %s load  OBJ  FILE [type TYPE] [dev NAME] \\\n"
+               "                         [map { idx IDX | name NAME } MAP]\n"
                "       %s %s help\n"
                "\n"
+               "       " HELP_SPEC_MAP "\n"
                "       " HELP_SPEC_PROGRAM "\n"
+               "       TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
+               "                 tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
+               "                 cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
+               "                 lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
+               "                 cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
+               "                 cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
+               "                 cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
                "       " HELP_SPEC_OPTIONS "\n"
                "",
                bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
index b97f1da60dd1d75f401501428f4c1bfdecdd69f3..3284759df98ad4f325b25f719db0cfef7ac9f61c 100644 (file)
@@ -35,6 +35,7 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#define _GNU_SOURCE
 #include <stdarg.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -66,9 +67,8 @@ void kernel_syms_load(struct dump_data *dd)
        while (!feof(fp)) {
                if (!fgets(buff, sizeof(buff), fp))
                        break;
-               tmp = realloc(dd->sym_mapping,
-                             (dd->sym_count + 1) *
-                             sizeof(*dd->sym_mapping));
+               tmp = reallocarray(dd->sym_mapping, dd->sym_count + 1,
+                                  sizeof(*dd->sym_mapping));
                if (!tmp) {
 out:
                        free(dd->sym_mapping);
index 5b6dda3b1ca8f238e3f55660ca88f053733dda78..f216b2f5c3d7b591387acf171cd77d633b6c3757 100644 (file)
@@ -57,6 +57,7 @@ FEATURE_TESTS_BASIC :=                  \
         libunwind-aarch64               \
         pthread-attr-setaffinity-np     \
         pthread-barrier                \
+        reallocarray                    \
         stackprotector-all              \
         timerfd                         \
         libdw-dwarf-unwind              \
index dac9563b54707c6cb65deb3e609ecc9785d4bf0c..0516259be70f071f2533496ead690d1ebd5ba3b8 100644 (file)
@@ -14,6 +14,7 @@ FILES=                                          \
          test-libaudit.bin                      \
          test-libbfd.bin                        \
          test-disassembler-four-args.bin        \
+         test-reallocarray.bin                 \
          test-liberty.bin                       \
          test-liberty-z.bin                     \
          test-cplus-demangle.bin                \
@@ -204,6 +205,9 @@ $(OUTPUT)test-libbfd.bin:
 $(OUTPUT)test-disassembler-four-args.bin:
        $(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
 
+$(OUTPUT)test-reallocarray.bin:
+       $(BUILD)
+
 $(OUTPUT)test-liberty.bin:
        $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
 
diff --git a/tools/build/feature/test-reallocarray.c b/tools/build/feature/test-reallocarray.c
new file mode 100644 (file)
index 0000000..8170de3
--- /dev/null
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <stdlib.h>
+
+int main(void)
+{
+       return !!reallocarray(NULL, 1, 1);
+}
index 70fe612957338c84ffeb61f03c358c6ca1e32789..0d35f18006a136b4578f2298243e3728a067c272 100644 (file)
@@ -36,3 +36,7 @@
 #endif
 #define __printf(a, b) __attribute__((format(printf, a, b)))
 #define __scanf(a, b)  __attribute__((format(scanf, a, b)))
+
+#if GCC_VERSION >= 50100
+#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+#endif
diff --git a/tools/include/linux/overflow.h b/tools/include/linux/overflow.h
new file mode 100644 (file)
index 0000000..8712ff7
--- /dev/null
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+#ifndef __LINUX_OVERFLOW_H
+#define __LINUX_OVERFLOW_H
+
+#include <linux/compiler.h>
+
+/*
+ * In the fallback code below, we need to compute the minimum and
+ * maximum values representable in a given type. These macros may also
+ * be useful elsewhere, so we provide them outside the
+ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
+ *
+ * It would seem more obvious to do something like
+ *
+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
+ *
+ * Unfortunately, the middle expressions, strictly speaking, have
+ * undefined behaviour, and at least some versions of gcc warn about
+ * the type_max expression (but not if -fsanitize=undefined is in
+ * effect; in that case, the warning is deferred to runtime...).
+ *
+ * The slightly excessive casting in type_min is to make sure the
+ * macros also produce sensible values for the exotic type _Bool. [The
+ * overflow checkers only almost work for _Bool, but that's
+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
+ * argument.]
+ *
+ * Idea stolen from
+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
+ * credit to Christian Biere.
+ */
+#define is_signed_type(type)       (((type)(-1)) < (type)1)
+#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+
+
+#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+/*
+ * For simplicity and code hygiene, the fallback code below insists on
+ * a, b and *d having the same type (similar to the min() and max()
+ * macros), whereas gcc's type-generic overflow checkers accept
+ * different types. Hence we don't just make check_add_overflow an
+ * alias for __builtin_add_overflow, but add type checks similar to
+ * below.
+ */
+#define check_add_overflow(a, b, d) ({         \
+       typeof(a) __a = (a);                    \
+       typeof(b) __b = (b);                    \
+       typeof(d) __d = (d);                    \
+       (void) (&__a == &__b);                  \
+       (void) (&__a == __d);                   \
+       __builtin_add_overflow(__a, __b, __d);  \
+})
+
+#define check_sub_overflow(a, b, d) ({         \
+       typeof(a) __a = (a);                    \
+       typeof(b) __b = (b);                    \
+       typeof(d) __d = (d);                    \
+       (void) (&__a == &__b);                  \
+       (void) (&__a == __d);                   \
+       __builtin_sub_overflow(__a, __b, __d);  \
+})
+
+#define check_mul_overflow(a, b, d) ({         \
+       typeof(a) __a = (a);                    \
+       typeof(b) __b = (b);                    \
+       typeof(d) __d = (d);                    \
+       (void) (&__a == &__b);                  \
+       (void) (&__a == __d);                   \
+       __builtin_mul_overflow(__a, __b, __d);  \
+})
+
+#else
+
+
+/* Checking for unsigned overflow is relatively easy without causing UB. */
+#define __unsigned_add_overflow(a, b, d) ({    \
+       typeof(a) __a = (a);                    \
+       typeof(b) __b = (b);                    \
+       typeof(d) __d = (d);                    \
+       (void) (&__a == &__b);                  \
+       (void) (&__a == __d);                   \
+       *__d = __a + __b;                       \
+       *__d < __a;                             \
+})
+#define __unsigned_sub_overflow(a, b, d) ({    \
+       typeof(a) __a = (a);                    \
+       typeof(b) __b = (b);                    \
+       typeof(d) __d = (d);                    \
+       (void) (&__a == &__b);                  \
+       (void) (&__a == __d);                   \
+       *__d = __a - __b;                       \
+       __a < __b;                              \
+})
+/*
+ * If one of a or b is a compile-time constant, this avoids a division.
+ */
+#define __unsigned_mul_overflow(a, b, d) ({            \
+       typeof(a) __a = (a);                            \
+       typeof(b) __b = (b);                            \
+       typeof(d) __d = (d);                            \
+       (void) (&__a == &__b);                          \
+       (void) (&__a == __d);                           \
+       *__d = __a * __b;                               \
+       __builtin_constant_p(__b) ?                     \
+         __b > 0 && __a > type_max(typeof(__a)) / __b : \
+         __a > 0 && __b > type_max(typeof(__b)) / __a;  \
+})
+
+/*
+ * For signed types, detecting overflow is much harder, especially if
+ * we want to avoid UB. But the interface of these macros is such that
+ * we must provide a result in *d, and in fact we must produce the
+ * result promised by gcc's builtins, which is simply the possibly
+ * wrapped-around value. Fortunately, we can just formally do the
+ * operations in the widest relevant unsigned type (u64) and then
+ * truncate the result - gcc is smart enough to generate the same code
+ * with and without the (u64) casts.
+ */
+
+/*
+ * Adding two signed integers can overflow only if they have the same
+ * sign, and overflow has happened iff the result has the opposite
+ * sign.
+ */
+#define __signed_add_overflow(a, b, d) ({      \
+       typeof(a) __a = (a);                    \
+       typeof(b) __b = (b);                    \
+       typeof(d) __d = (d);                    \
+       (void) (&__a == &__b);                  \
+       (void) (&__a == __d);                   \
+       *__d = (u64)__a + (u64)__b;             \
+       (((~(__a ^ __b)) & (*__d ^ __a))        \
+               & type_min(typeof(__a))) != 0;  \
+})
+
+/*
+ * Subtraction is similar, except that overflow can now happen only
+ * when the signs are opposite. In this case, overflow has happened if
+ * the result has the opposite sign of a.
+ */
+#define __signed_sub_overflow(a, b, d) ({      \
+       typeof(a) __a = (a);                    \
+       typeof(b) __b = (b);                    \
+       typeof(d) __d = (d);                    \
+       (void) (&__a == &__b);                  \
+       (void) (&__a == __d);                   \
+       *__d = (u64)__a - (u64)__b;             \
+       ((((__a ^ __b)) & (*__d ^ __a))         \
+               & type_min(typeof(__a))) != 0;  \
+})
+
+/*
+ * Signed multiplication is rather hard. gcc always follows C99, so
+ * division is truncated towards 0. This means that we can write the
+ * overflow check like this:
+ *
+ * (a > 0 && (b > MAX/a || b < MIN/a)) ||
+ * (a < -1 && (b > MIN/a || b < MAX/a) ||
+ * (a == -1 && b == MIN)
+ *
+ * The redundant casts of -1 are to silence an annoying -Wtype-limits
+ * (included in -Wextra) warning: When the type is u8 or u16, the
+ * __b_c_e in check_mul_overflow obviously selects
+ * __unsigned_mul_overflow, but unfortunately gcc still parses this
+ * code and warns about the limited range of __b.
+ */
+
+#define __signed_mul_overflow(a, b, d) ({                              \
+       typeof(a) __a = (a);                                            \
+       typeof(b) __b = (b);                                            \
+       typeof(d) __d = (d);                                            \
+       typeof(a) __tmax = type_max(typeof(a));                         \
+       typeof(a) __tmin = type_min(typeof(a));                         \
+       (void) (&__a == &__b);                                          \
+       (void) (&__a == __d);                                           \
+       *__d = (u64)__a * (u64)__b;                                     \
+       (__b > 0   && (__a > __tmax/__b || __a < __tmin/__b)) ||        \
+       (__b < (typeof(__b))-1  && (__a > __tmin/__b || __a < __tmax/__b)) || \
+       (__b == (typeof(__b))-1 && __a == __tmin);                      \
+})
+
+
+#define check_add_overflow(a, b, d)                                    \
+       __builtin_choose_expr(is_signed_type(typeof(a)),                \
+                       __signed_add_overflow(a, b, d),                 \
+                       __unsigned_add_overflow(a, b, d))
+
+#define check_sub_overflow(a, b, d)                                    \
+       __builtin_choose_expr(is_signed_type(typeof(a)),                \
+                       __signed_sub_overflow(a, b, d),                 \
+                       __unsigned_sub_overflow(a, b, d))
+
+#define check_mul_overflow(a, b, d)                                    \
+       __builtin_choose_expr(is_signed_type(typeof(a)),                \
+                       __signed_mul_overflow(a, b, d),                 \
+                       __unsigned_mul_overflow(a, b, d))
+
+
+#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
+
+/**
+ * array_size() - Calculate size of 2-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array_size(size_t a, size_t b)
+{
+       size_t bytes;
+
+       if (check_mul_overflow(a, b, &bytes))
+               return SIZE_MAX;
+
+       return bytes;
+}
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ *
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
+{
+       size_t bytes;
+
+       if (check_mul_overflow(a, b, &bytes))
+               return SIZE_MAX;
+       if (check_mul_overflow(bytes, c, &bytes))
+               return SIZE_MAX;
+
+       return bytes;
+}
+
+static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c)
+{
+       size_t bytes;
+
+       if (check_mul_overflow(n, size, &bytes))
+               return SIZE_MAX;
+       if (check_add_overflow(bytes, c, &bytes))
+               return SIZE_MAX;
+
+       return bytes;
+}
+
+/**
+ * struct_size() - Calculate size of structure with trailing array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @n: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @n @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, n)                                      \
+       __ab_c_size(n,                                                  \
+                   sizeof(*(p)->member) + __must_be_array((p)->member),\
+                   sizeof(*(p)))
+
+#endif /* __LINUX_OVERFLOW_H */
diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h
new file mode 100644 (file)
index 0000000..664ced8
--- /dev/null
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#ifndef __TOOLS_LIBC_COMPAT_H
+#define __TOOLS_LIBC_COMPAT_H
+
+#include <stdlib.h>
+#include <linux/overflow.h>
+
+#ifdef COMPAT_NEED_REALLOCARRAY
+static inline void *reallocarray(void *ptr, size_t nmemb, size_t size)
+{
+       size_t bytes;
+
+       if (unlikely(check_mul_overflow(nmemb, size, &bytes)))
+               return NULL;
+       return realloc(ptr, bytes);
+}
+#endif
+#endif
index 6fdff5945c8a08f27af713f6b59cb27b315da447..9c660e1688abe1cd6bf0e22bf709515e8a463e0d 100644 (file)
@@ -680,6 +680,13 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ATOMIC  3
 
+/**
+ * DRM_CLIENT_CAP_ASPECT_RATIO
+ *
+ * If set to 1, the DRM core will provide aspect ratio information in modes.
+ */
+#define DRM_CLIENT_CAP_ASPECT_RATIO    4
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
        __u64 capability;
index e0b06784f2279d9f428da4a0bce526721a452570..870113916caca5ef3acbad43c821d5b5111d0ffc 100644 (file)
@@ -1826,7 +1826,7 @@ union bpf_attr {
  *             A non-negative value equal to or less than *size* on success,
  *             or a negative error in case of failure.
  *
- * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
  *     Description
  *             This helper is similar to **bpf_skb_load_bytes**\ () in that
  *             it provides an easy way to load *len* bytes from *offset*
@@ -1857,7 +1857,8 @@ union bpf_attr {
  *             is resolved), the nexthop address is returned in ipv4_dst
  *             or ipv6_dst based on family, smac is set to mac address of
  *             egress device, dmac is set to nexthop mac address, rt_metric
- *             is set to metric from route (IPv4/IPv6 only).
+ *             is set to metric from route (IPv4/IPv6 only), and ifindex
+ *             is set to the device index of the nexthop from the FIB lookup.
  *
  *             *plen* argument is the size of the passed in struct.
  *             *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
  *             *ctx* is either **struct xdp_md** for XDP programs or
  *             **struct sk_buff** tc cls_act programs.
  *     Return
- *             Egress device index on success, 0 if packet needs to continue
- *             up the stack for further processing or a negative error in case
- *             of failure.
+ *             * < 0 if any input argument is invalid
+ *             *   0 on success (packet is forwarded, nexthop neighbor exists)
+ *             * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ *               packet is not forwarded or needs assist from full stack
  *
  * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
@@ -2031,7 +2033,6 @@ union bpf_attr {
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
- *
  *     Return
  *             0
  *
@@ -2051,7 +2052,6 @@ union bpf_attr {
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
- *
  *     Return
  *             0
  *
@@ -2555,6 +2555,9 @@ enum {
                                         * Arg1: old_state
                                         * Arg2: new_state
                                         */
+       BPF_SOCK_OPS_TCP_LISTEN_CB,     /* Called on listen(2), right after
+                                        * socket transition to LISTEN state.
+                                        */
 };
 
 /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -2612,6 +2615,18 @@ struct bpf_raw_tracepoint_args {
 #define BPF_FIB_LOOKUP_DIRECT  BIT(0)
 #define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
 
+enum {
+       BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
+       BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
+       BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
+       BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
+       BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
+       BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+       BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
+       BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
+       BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
+};
+
 struct bpf_fib_lookup {
        /* input:  network family for lookup (AF_INET, AF_INET6)
         * output: network family of egress nexthop
@@ -2625,12 +2640,16 @@ struct bpf_fib_lookup {
 
        /* total length of packet from network header - used for MTU check */
        __u16   tot_len;
-       __u32   ifindex;  /* L3 device index for lookup */
+
+       /* input: L3 device index for lookup
+        * output: device index from FIB lookup
+        */
+       __u32   ifindex;
 
        union {
                /* inputs to lookup */
                __u8    tos;            /* AF_INET  */
-               __be32  flowlabel;      /* AF_INET6 */
+               __be32  flowinfo;       /* AF_INET6, flow_label + priority */
 
                /* output: metric of fib result (IPv4/IPv6 only) */
                __u32   rt_metric;
index 68699f654118592527096dc26336f57da6a01cdc..cf01b68242448512416c1b1aa25f0904915aad0a 100644 (file)
@@ -333,6 +333,7 @@ enum {
        IFLA_BRPORT_BCAST_FLOOD,
        IFLA_BRPORT_GROUP_FWD_MASK,
        IFLA_BRPORT_NEIGH_SUPPRESS,
+       IFLA_BRPORT_ISOLATED,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -516,6 +517,7 @@ enum {
        IFLA_VXLAN_COLLECT_METADATA,
        IFLA_VXLAN_LABEL,
        IFLA_VXLAN_GPE,
+       IFLA_VXLAN_TTL_INHERIT,
        __IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
index 39e364c70caf780312808e179a1bb234aa45460e..b6270a3b38e9f3fb410e8c80d8658b2c01a8ef96 100644 (file)
@@ -948,6 +948,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_BPB 152
 #define KVM_CAP_GET_MSR_FEATURES 153
 #define KVM_CAP_HYPERV_EVENTFD 154
+#define KVM_CAP_HYPERV_TLBFLUSH 155
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 6070e655042dcb18f1013e1c740d8304023e79c4..13a861135127f04e21590955de6802ea40380208 100644 (file)
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o
index 5390e7725e4306408bbe5078652508f09263b0df..7a8e4c98ef1a681922bb91745d5e99b3a5b25777 100644 (file)
@@ -66,7 +66,7 @@ ifndef VERBOSE
 endif
 
 FEATURE_USER = .libbpf
-FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf
+FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf reallocarray
 FEATURE_DISPLAY = libelf bpf
 
 INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi -I$(srctree)/tools/perf
@@ -120,6 +120,10 @@ ifeq ($(feature-libelf-getphdrnum), 1)
   override CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
 endif
 
+ifeq ($(feature-reallocarray), 0)
+  override CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+endif
+
 # Append required CFLAGS
 override CFLAGS += $(EXTRA_WARNINGS)
 override CFLAGS += -Werror -Wall
index 8c54a4b6f187539cfe21d7a93e5748cab8b4ad59..03161be094b44cfdbca5790ee3e5b12f5e57051c 100644 (file)
 
 #define BTF_MAX_NR_TYPES 65535
 
+#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \
+               ((k) == BTF_KIND_VOLATILE) || \
+               ((k) == BTF_KIND_CONST) || \
+               ((k) == BTF_KIND_RESTRICT))
+
 static struct btf_type btf_void;
 
 struct btf {
@@ -33,14 +38,6 @@ struct btf {
        int fd;
 };
 
-static const char *btf_name_by_offset(const struct btf *btf, uint32_t offset)
-{
-       if (offset < btf->hdr->str_len)
-               return &btf->strings[offset];
-       else
-               return NULL;
-}
-
 static int btf_add_type(struct btf *btf, struct btf_type *t)
 {
        if (btf->types_size - btf->nr_types < 2) {
@@ -190,15 +187,6 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
        return 0;
 }
 
-static const struct btf_type *btf_type_by_id(const struct btf *btf,
-                                            uint32_t type_id)
-{
-       if (type_id > btf->nr_types)
-               return NULL;
-
-       return btf->types[type_id];
-}
-
 static bool btf_type_is_void(const struct btf_type *t)
 {
        return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
@@ -234,7 +222,7 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
        int64_t size = -1;
        int i;
 
-       t = btf_type_by_id(btf, type_id);
+       t = btf__type_by_id(btf, type_id);
        for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
             i++) {
                size = btf_type_size(t);
@@ -259,7 +247,7 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
                        return -EINVAL;
                }
 
-               t = btf_type_by_id(btf, type_id);
+               t = btf__type_by_id(btf, type_id);
        }
 
        if (size < 0)
@@ -271,6 +259,26 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
        return nelems * size;
 }
 
+int btf__resolve_type(const struct btf *btf, __u32 type_id)
+{
+       const struct btf_type *t;
+       int depth = 0;
+
+       t = btf__type_by_id(btf, type_id);
+       while (depth < MAX_RESOLVE_DEPTH &&
+              !btf_type_is_void_or_null(t) &&
+              IS_MODIFIER(BTF_INFO_KIND(t->info))) {
+               type_id = t->type;
+               t = btf__type_by_id(btf, type_id);
+               depth++;
+       }
+
+       if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
+               return -EINVAL;
+
+       return type_id;
+}
+
 int32_t btf__find_by_name(const struct btf *btf, const char *type_name)
 {
        uint32_t i;
@@ -280,7 +288,7 @@ int32_t btf__find_by_name(const struct btf *btf, const char *type_name)
 
        for (i = 1; i <= btf->nr_types; i++) {
                const struct btf_type *t = btf->types[i];
-               const char *name = btf_name_by_offset(btf, t->name_off);
+               const char *name = btf__name_by_offset(btf, t->name_off);
 
                if (name && !strcmp(type_name, name))
                        return i;
@@ -371,3 +379,20 @@ int btf__fd(const struct btf *btf)
 {
        return btf->fd;
 }
+
+const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
+{
+       if (offset < btf->hdr->str_len)
+               return &btf->strings[offset];
+       else
+               return NULL;
+}
+
+const struct btf_type *btf__type_by_id(const struct btf *btf,
+                                      __u32 type_id)
+{
+       if (type_id > btf->nr_types)
+               return NULL;
+
+       return btf->types[type_id];
+}
index 74bb344035bb9cb3d5a4391f7d80a21ee05fb064..24f361d99a5e87aeaf914a37982088b803a8aafd 100644 (file)
@@ -17,6 +17,9 @@ void btf__free(struct btf *btf);
 struct btf *btf__new(uint8_t *data, uint32_t size, btf_print_fn_t err_log);
 int32_t btf__find_by_name(const struct btf *btf, const char *type_name);
 int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id);
+int btf__resolve_type(const struct btf *btf, __u32 type_id);
 int btf__fd(const struct btf *btf);
+const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
+const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id);
 
 #endif
index a1e96b5de5ff88c13926fdb28a70150f821ec694..955f8eafbf41e516810662b2c42e4556d800192f 100644 (file)
@@ -22,6 +22,7 @@
  * License along with this program; if not,  see <http://www.gnu.org/licenses>
  */
 
+#define _GNU_SOURCE
 #include <stdlib.h>
 #include <stdio.h>
 #include <stdarg.h>
@@ -41,6 +42,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/vfs.h>
+#include <tools/libc_compat.h>
 #include <libelf.h>
 #include <gelf.h>
 
@@ -95,54 +97,6 @@ void libbpf_set_print(libbpf_print_fn_t warn,
 
 #define STRERR_BUFSIZE  128
 
-#define ERRNO_OFFSET(e)                ((e) - __LIBBPF_ERRNO__START)
-#define ERRCODE_OFFSET(c)      ERRNO_OFFSET(LIBBPF_ERRNO__##c)
-#define NR_ERRNO       (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
-
-static const char *libbpf_strerror_table[NR_ERRNO] = {
-       [ERRCODE_OFFSET(LIBELF)]        = "Something wrong in libelf",
-       [ERRCODE_OFFSET(FORMAT)]        = "BPF object format invalid",
-       [ERRCODE_OFFSET(KVERSION)]      = "'version' section incorrect or lost",
-       [ERRCODE_OFFSET(ENDIAN)]        = "Endian mismatch",
-       [ERRCODE_OFFSET(INTERNAL)]      = "Internal error in libbpf",
-       [ERRCODE_OFFSET(RELOC)]         = "Relocation failed",
-       [ERRCODE_OFFSET(VERIFY)]        = "Kernel verifier blocks program loading",
-       [ERRCODE_OFFSET(PROG2BIG)]      = "Program too big",
-       [ERRCODE_OFFSET(KVER)]          = "Incorrect kernel version",
-       [ERRCODE_OFFSET(PROGTYPE)]      = "Kernel doesn't support this program type",
-       [ERRCODE_OFFSET(WRNGPID)]       = "Wrong pid in netlink message",
-       [ERRCODE_OFFSET(INVSEQ)]        = "Invalid netlink sequence",
-};
-
-int libbpf_strerror(int err, char *buf, size_t size)
-{
-       if (!buf || !size)
-               return -1;
-
-       err = err > 0 ? err : -err;
-
-       if (err < __LIBBPF_ERRNO__START) {
-               int ret;
-
-               ret = strerror_r(err, buf, size);
-               buf[size - 1] = '\0';
-               return ret;
-       }
-
-       if (err < __LIBBPF_ERRNO__END) {
-               const char *msg;
-
-               msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
-               snprintf(buf, size, "%s", msg);
-               buf[size - 1] = '\0';
-               return 0;
-       }
-
-       snprintf(buf, size, "Unknown libbpf error %d", err);
-       buf[size - 1] = '\0';
-       return -1;
-}
-
 #define CHECK_ERR(action, err, out) do {       \
        err = action;                   \
        if (err)                        \
@@ -234,6 +188,7 @@ struct bpf_object {
        size_t nr_maps;
 
        bool loaded;
+       bool has_pseudo_calls;
 
        /*
         * Information when doing elf related work. Only valid if fd
@@ -368,7 +323,7 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
        progs = obj->programs;
        nr_progs = obj->nr_programs;
 
-       progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
+       progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
        if (!progs) {
                /*
                 * In this case the original obj->programs
@@ -400,10 +355,6 @@ bpf_object__init_prog_names(struct bpf_object *obj)
                const char *name = NULL;
 
                prog = &obj->programs[pi];
-               if (prog->idx == obj->efile.text_shndx) {
-                       name = ".text";
-                       goto skip_search;
-               }
 
                for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
                     si++) {
@@ -426,12 +377,15 @@ bpf_object__init_prog_names(struct bpf_object *obj)
                        }
                }
 
+               if (!name && prog->idx == obj->efile.text_shndx)
+                       name = ".text";
+
                if (!name) {
                        pr_warning("failed to find sym for prog %s\n",
                                   prog->section_name);
                        return -EINVAL;
                }
-skip_search:
+
                prog->name = strdup(name);
                if (!prog->name) {
                        pr_warning("failed to allocate memory for prog sym %s\n",
@@ -870,8 +824,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
                                continue;
                        }
 
-                       reloc = realloc(reloc,
-                                       sizeof(*obj->efile.reloc) * nr_reloc);
+                       reloc = reallocarray(reloc, nr_reloc,
+                                            sizeof(*obj->efile.reloc));
                        if (!reloc) {
                                pr_warning("realloc failed\n");
                                err = -ENOMEM;
@@ -981,6 +935,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
                        prog->reloc_desc[i].type = RELO_CALL;
                        prog->reloc_desc[i].insn_idx = insn_idx;
                        prog->reloc_desc[i].text_off = sym.st_value;
+                       obj->has_pseudo_calls = true;
                        continue;
                }
 
@@ -1080,6 +1035,53 @@ static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
        return 0;
 }
 
+int bpf_map__reuse_fd(struct bpf_map *map, int fd)
+{
+       struct bpf_map_info info = {};
+       __u32 len = sizeof(info);
+       int new_fd, err;
+       char *new_name;
+
+       err = bpf_obj_get_info_by_fd(fd, &info, &len);
+       if (err)
+               return err;
+
+       new_name = strdup(info.name);
+       if (!new_name)
+               return -errno;
+
+       new_fd = open("/", O_RDONLY | O_CLOEXEC);
+       if (new_fd < 0)
+               goto err_free_new_name;
+
+       new_fd = dup3(fd, new_fd, O_CLOEXEC);
+       if (new_fd < 0)
+               goto err_close_new_fd;
+
+       err = zclose(map->fd);
+       if (err)
+               goto err_close_new_fd;
+       free(map->name);
+
+       map->fd = new_fd;
+       map->name = new_name;
+       map->def.type = info.type;
+       map->def.key_size = info.key_size;
+       map->def.value_size = info.value_size;
+       map->def.max_entries = info.max_entries;
+       map->def.map_flags = info.map_flags;
+       map->btf_key_type_id = info.btf_key_type_id;
+       map->btf_value_type_id = info.btf_value_type_id;
+
+       return 0;
+
+err_close_new_fd:
+       close(new_fd);
+err_free_new_name:
+       free(new_name);
+       return -errno;
+}
+
 static int
 bpf_object__create_maps(struct bpf_object *obj)
 {
@@ -1092,6 +1094,12 @@ bpf_object__create_maps(struct bpf_object *obj)
                struct bpf_map_def *def = &map->def;
                int *pfd = &map->fd;
 
+               if (map->fd >= 0) {
+                       pr_debug("skip map create (preset) %s: fd=%d\n",
+                                map->name, map->fd);
+                       continue;
+               }
+
                create_attr.name = map->name;
                create_attr.map_ifindex = map->map_ifindex;
                create_attr.map_type = def->type;
@@ -1162,7 +1170,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
                        return -LIBBPF_ERRNO__RELOC;
                }
                new_cnt = prog->insns_cnt + text->insns_cnt;
-               new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
+               new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
                if (!new_insn) {
                        pr_warning("oom in prog realloc\n");
                        return -ENOMEM;
@@ -1426,6 +1434,12 @@ bpf_program__load(struct bpf_program *prog,
        return err;
 }
 
+static bool bpf_program__is_function_storage(struct bpf_program *prog,
+                                            struct bpf_object *obj)
+{
+       return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
+}
+
 static int
 bpf_object__load_progs(struct bpf_object *obj)
 {
@@ -1433,7 +1447,7 @@ bpf_object__load_progs(struct bpf_object *obj)
        int err;
 
        for (i = 0; i < obj->nr_programs; i++) {
-               if (obj->programs[i].idx == obj->efile.text_shndx)
+               if (bpf_program__is_function_storage(&obj->programs[i], obj))
                        continue;
                err = bpf_program__load(&obj->programs[i],
                                        obj->license,
@@ -1513,15 +1527,26 @@ __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
        return ERR_PTR(err);
 }
 
-struct bpf_object *bpf_object__open(const char *path)
+struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
 {
        /* param validation */
-       if (!path)
+       if (!attr->file)
                return NULL;
 
-       pr_debug("loading %s\n", path);
+       pr_debug("loading %s\n", attr->file);
+
+       return __bpf_object__open(attr->file, NULL, 0,
+                                 bpf_prog_type__needs_kver(attr->prog_type));
+}
 
-       return __bpf_object__open(path, NULL, 0, true);
+struct bpf_object *bpf_object__open(const char *path)
+{
+       struct bpf_object_open_attr attr = {
+               .file           = path,
+               .prog_type      = BPF_PROG_TYPE_UNSPEC,
+       };
+
+       return bpf_object__open_xattr(&attr);
 }
 
 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
@@ -1858,8 +1883,8 @@ void *bpf_object__priv(struct bpf_object *obj)
        return obj ? obj->priv : ERR_PTR(-EINVAL);
 }
 
-struct bpf_program *
-bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
+static struct bpf_program *
+__bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
 {
        size_t idx;
 
@@ -1880,6 +1905,18 @@ bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
        return &obj->programs[idx];
 }
 
+struct bpf_program *
+bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
+{
+       struct bpf_program *prog = prev;
+
+       do {
+               prog = __bpf_program__next(prog, obj);
+       } while (prog && bpf_program__is_function_storage(prog, obj));
+
+       return prog;
+}
+
 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
                          bpf_program_clear_priv_t clear_priv)
 {
@@ -1896,6 +1933,11 @@ void *bpf_program__priv(struct bpf_program *prog)
        return prog ? prog->priv : ERR_PTR(-EINVAL);
 }
 
+void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
+{
+       prog->prog_ifindex = ifindex;
+}
+
 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
 {
        const char *title;
@@ -2037,9 +2079,11 @@ static const struct {
        BPF_PROG_SEC("lwt_in",          BPF_PROG_TYPE_LWT_IN),
        BPF_PROG_SEC("lwt_out",         BPF_PROG_TYPE_LWT_OUT),
        BPF_PROG_SEC("lwt_xmit",        BPF_PROG_TYPE_LWT_XMIT),
+       BPF_PROG_SEC("lwt_seg6local",   BPF_PROG_TYPE_LWT_SEG6LOCAL),
        BPF_PROG_SEC("sockops",         BPF_PROG_TYPE_SOCK_OPS),
        BPF_PROG_SEC("sk_skb",          BPF_PROG_TYPE_SK_SKB),
        BPF_PROG_SEC("sk_msg",          BPF_PROG_TYPE_SK_MSG),
+       BPF_PROG_SEC("lirc_mode2",      BPF_PROG_TYPE_LIRC_MODE2),
        BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND),
        BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND),
        BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
@@ -2055,23 +2099,31 @@ static const struct {
 #undef BPF_S_PROG_SEC
 #undef BPF_SA_PROG_SEC
 
-static int bpf_program__identify_section(struct bpf_program *prog)
+int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+                            enum bpf_attach_type *expected_attach_type)
 {
        int i;
 
-       if (!prog->section_name)
-               goto err;
-
-       for (i = 0; i < ARRAY_SIZE(section_names); i++)
-               if (strncmp(prog->section_name, section_names[i].sec,
-                           section_names[i].len) == 0)
-                       return i;
+       if (!name)
+               return -EINVAL;
 
-err:
-       pr_warning("failed to guess program type based on section name %s\n",
-                  prog->section_name);
+       for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+               if (strncmp(name, section_names[i].sec, section_names[i].len))
+                       continue;
+               *prog_type = section_names[i].prog_type;
+               *expected_attach_type = section_names[i].expected_attach_type;
+               return 0;
+       }
+       return -EINVAL;
+}
 
-       return -1;
+static int
+bpf_program__identify_section(struct bpf_program *prog,
+                             enum bpf_prog_type *prog_type,
+                             enum bpf_attach_type *expected_attach_type)
+{
+       return libbpf_prog_type_by_name(prog->section_name, prog_type,
+                                       expected_attach_type);
 }
 
 int bpf_map__fd(struct bpf_map *map)
@@ -2120,6 +2172,16 @@ void *bpf_map__priv(struct bpf_map *map)
        return map ? map->priv : ERR_PTR(-EINVAL);
 }
 
+bool bpf_map__is_offload_neutral(struct bpf_map *map)
+{
+       return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
+}
+
+void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
+{
+       map->map_ifindex = ifindex;
+}
+
 struct bpf_map *
 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
 {
@@ -2194,12 +2256,15 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
                        struct bpf_object **pobj, int *prog_fd)
 {
+       struct bpf_object_open_attr open_attr = {
+               .file           = attr->file,
+               .prog_type      = attr->prog_type,
+       };
        struct bpf_program *prog, *first_prog = NULL;
        enum bpf_attach_type expected_attach_type;
        enum bpf_prog_type prog_type;
        struct bpf_object *obj;
        struct bpf_map *map;
-       int section_idx;
        int err;
 
        if (!attr)
@@ -2207,8 +2272,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
        if (!attr->file)
                return -EINVAL;
 
-       obj = __bpf_object__open(attr->file, NULL, 0,
-                                bpf_prog_type__needs_kver(attr->prog_type));
+       obj = bpf_object__open_xattr(&open_attr);
        if (IS_ERR_OR_NULL(obj))
                return -ENOENT;
 
@@ -2221,26 +2285,27 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
                prog->prog_ifindex = attr->ifindex;
                expected_attach_type = attr->expected_attach_type;
                if (prog_type == BPF_PROG_TYPE_UNSPEC) {
-                       section_idx = bpf_program__identify_section(prog);
-                       if (section_idx < 0) {
+                       err = bpf_program__identify_section(prog, &prog_type,
+                                                           &expected_attach_type);
+                       if (err < 0) {
+                               pr_warning("failed to guess program type based on section name %s\n",
+                                          prog->section_name);
                                bpf_object__close(obj);
                                return -EINVAL;
                        }
-                       prog_type = section_names[section_idx].prog_type;
-                       expected_attach_type =
-                               section_names[section_idx].expected_attach_type;
                }
 
                bpf_program__set_type(prog, prog_type);
                bpf_program__set_expected_attach_type(prog,
                                                      expected_attach_type);
 
-               if (prog->idx != obj->efile.text_shndx && !first_prog)
+               if (!bpf_program__is_function_storage(prog, obj) && !first_prog)
                        first_prog = prog;
        }
 
        bpf_map__for_each(map, obj) {
-               map->map_ifindex = attr->ifindex;
+               if (!bpf_map__is_offload_neutral(map))
+                       map->map_ifindex = attr->ifindex;
        }
 
        if (!first_prog) {
index 09976531aa74dc7583a8ad9db422ea9774a2c87f..1f8fc20604605d512f5fa12517bbeed4b9979dac 100644 (file)
@@ -66,7 +66,13 @@ void libbpf_set_print(libbpf_print_fn_t warn,
 /* Hide internal to user */
 struct bpf_object;
 
+struct bpf_object_open_attr {
+       const char *file;
+       enum bpf_prog_type prog_type;
+};
+
 struct bpf_object *bpf_object__open(const char *path);
+struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr);
 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
                                           size_t obj_buf_sz,
                                           const char *name);
@@ -92,6 +98,9 @@ int bpf_object__set_priv(struct bpf_object *obj, void *priv,
                         bpf_object_clear_priv_t clear_priv);
 void *bpf_object__priv(struct bpf_object *prog);
 
+int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+                            enum bpf_attach_type *expected_attach_type);
+
 /* Accessors of bpf_program */
 struct bpf_program;
 struct bpf_program *bpf_program__next(struct bpf_program *prog,
@@ -109,6 +118,7 @@ int bpf_program__set_priv(struct bpf_program *prog, void *priv,
                          bpf_program_clear_priv_t clear_priv);
 
 void *bpf_program__priv(struct bpf_program *prog);
+void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex);
 
 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy);
 
@@ -251,6 +261,9 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
 int bpf_map__set_priv(struct bpf_map *map, void *priv,
                      bpf_map_clear_priv_t clear_priv);
 void *bpf_map__priv(struct bpf_map *map);
+int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+bool bpf_map__is_offload_neutral(struct bpf_map *map);
+void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
 int bpf_map__pin(struct bpf_map *map, const char *path);
 
 long libbpf_get_error(const void *ptr);
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
new file mode 100644 (file)
index 0000000..d9ba851
--- /dev/null
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: LGPL-2.1
+
+/*
+ * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ * Copyright (C) 2017 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "libbpf.h"
+
+#define ERRNO_OFFSET(e)                ((e) - __LIBBPF_ERRNO__START)
+#define ERRCODE_OFFSET(c)      ERRNO_OFFSET(LIBBPF_ERRNO__##c)
+#define NR_ERRNO       (__LIBBPF_ERRNO__END - __LIBBPF_ERRNO__START)
+
+static const char *libbpf_strerror_table[NR_ERRNO] = {
+       [ERRCODE_OFFSET(LIBELF)]        = "Something wrong in libelf",
+       [ERRCODE_OFFSET(FORMAT)]        = "BPF object format invalid",
+       [ERRCODE_OFFSET(KVERSION)]      = "'version' section incorrect or lost",
+       [ERRCODE_OFFSET(ENDIAN)]        = "Endian mismatch",
+       [ERRCODE_OFFSET(INTERNAL)]      = "Internal error in libbpf",
+       [ERRCODE_OFFSET(RELOC)]         = "Relocation failed",
+       [ERRCODE_OFFSET(VERIFY)]        = "Kernel verifier blocks program loading",
+       [ERRCODE_OFFSET(PROG2BIG)]      = "Program too big",
+       [ERRCODE_OFFSET(KVER)]          = "Incorrect kernel version",
+       [ERRCODE_OFFSET(PROGTYPE)]      = "Kernel doesn't support this program type",
+       [ERRCODE_OFFSET(WRNGPID)]       = "Wrong pid in netlink message",
+       [ERRCODE_OFFSET(INVSEQ)]        = "Invalid netlink sequence",
+};
+
+int libbpf_strerror(int err, char *buf, size_t size)
+{
+       if (!buf || !size)
+               return -1;
+
+       err = err > 0 ? err : -err;
+
+       if (err < __LIBBPF_ERRNO__START) {
+               int ret;
+
+               ret = strerror_r(err, buf, size);
+               buf[size - 1] = '\0';
+               return ret;
+       }
+
+       if (err < __LIBBPF_ERRNO__END) {
+               const char *msg;
+
+               msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
+               snprintf(buf, size, "%s", msg);
+               buf[size - 1] = '\0';
+               return 0;
+       }
+
+       snprintf(buf, size, "Unknown libbpf error %d", err);
+       buf[size - 1] = '\0';
+       return -1;
+}
index 38047c6aa57576d170b3281eb0de0376894e9f41..f4a25bd1871fb856a8295b77c825323cd3f7fc25 100644 (file)
@@ -164,6 +164,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
                "lbug_with_loc",
                "fortify_panic",
                "usercopy_abort",
+               "machine_real_restart",
        };
 
        if (func->bind == STB_WEAK)
index 5dfe102fb5b533979a2fb726b621cedd398d0935..b10a90b6a7181f8968420a875a2b2fc2b3919321 100644 (file)
@@ -178,6 +178,9 @@ Print count deltas for fixed number of times.
 This option should be used together with "-I" option.
        example: 'perf stat -I 1000 --interval-count 2 -e cycles -a'
 
+--interval-clear::
+Clear the screen before next interval.
+
 --timeout msecs::
 Stop the 'perf stat' session and print count deltas after N milliseconds (minimum: 10 ms).
 This option is not supported with the "-I" option.
index 3598b8b75d274c8ebcc6fc0452091dec34c797b7..ef5d59a5742e2467fc409d52aebbfe4111ce0710 100644 (file)
@@ -243,7 +243,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
        u64 ip;
        u64 skip_slot = -1;
 
-       if (chain->nr < 3)
+       if (!chain || chain->nr < 3)
                return skip_slot;
 
        ip = chain->ips[2];
index 4dfe42666d0ce6e20214e70f0c2a6a3884106290..f0b1709a5ffb2b0901d7f2492252876d17bc25a0 100644 (file)
 330    common  pkey_alloc              __x64_sys_pkey_alloc
 331    common  pkey_free               __x64_sys_pkey_free
 332    common  statx                   __x64_sys_statx
+333    common  io_pgetevents           __x64_sys_io_pgetevents
+334    common  rseq                    __x64_sys_rseq
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
index 63eb49082774c94dfbabe7a18db73bdc2403fb6a..44195514b19e65a5ee0287b48fa0ab25fa44d66f 100644 (file)
@@ -1098,7 +1098,7 @@ static void *worker_thread(void *__tdata)
        u8 *global_data;
        u8 *process_data;
        u8 *thread_data;
-       u64 bytes_done;
+       u64 bytes_done, secs;
        long work_done;
        u32 l;
        struct rusage rusage;
@@ -1254,7 +1254,8 @@ static void *worker_thread(void *__tdata)
        timersub(&stop, &start0, &diff);
        td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
        td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
-       td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9;
+       secs = td->runtime_ns / NSEC_PER_SEC;
+       td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
 
        getrusage(RUSAGE_THREAD, &rusage);
        td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
index 5eb22cc563636c11d4e12bf42c71f00b3e1255db..8180319285af3377810c30c0298f37c73cb9bb8d 100644 (file)
@@ -283,6 +283,15 @@ static int process_sample_event(struct perf_tool *tool,
        return ret;
 }
 
+static int process_feature_event(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session)
+{
+       if (event->feat.feat_id < HEADER_LAST_FEATURE)
+               return perf_event__process_feature(tool, event, session);
+       return 0;
+}
+
 static int hist_entry__tty_annotate(struct hist_entry *he,
                                    struct perf_evsel *evsel,
                                    struct perf_annotate *ann)
@@ -471,7 +480,7 @@ int cmd_annotate(int argc, const char **argv)
                        .attr   = perf_event__process_attr,
                        .build_id = perf_event__process_build_id,
                        .tracing_data   = perf_event__process_tracing_data,
-                       .feature        = perf_event__process_feature,
+                       .feature        = process_feature_event,
                        .ordered_events = true,
                        .ordering_requires_timestamps = true,
                },
index 307b3594525f34cc9e14d758b71bcdf266414c3e..6a8738f7ead3613e691a7dc6aac523d585de39b2 100644 (file)
@@ -56,16 +56,16 @@ struct c2c_hist_entry {
 
        struct compute_stats     cstats;
 
+       unsigned long            paddr;
+       unsigned long            paddr_cnt;
+       bool                     paddr_zero;
+       char                    *nodestr;
+
        /*
         * must be at the end,
         * because of its callchain dynamic entry
         */
        struct hist_entry       he;
-
-       unsigned long            paddr;
-       unsigned long            paddr_cnt;
-       bool                     paddr_zero;
-       char                    *nodestr;
 };
 
 static char const *coalesce_default = "pid,iaddr";
index cdb5b694983273de734fa1f45848c49eeac239d2..c04dc7b537971a07801153db1f9cdaf5da8a2c34 100644 (file)
@@ -217,7 +217,8 @@ static int process_feature_event(struct perf_tool *tool,
        }
 
        /*
-        * All features are received, we can force the
+        * (feat_id = HEADER_LAST_FEATURE) is the end marker which
+        * means all features are received, now we can force the
         * group if needed.
         */
        setup_forced_leader(rep, session->evlist);
index b3bf35512d2198a94e46a7ecaf6052bca616ee7d..568ddfac3213e084c1f4c6077cd73943bf0644b9 100644 (file)
@@ -180,6 +180,18 @@ static struct {
                                  PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE
        },
 
+       [PERF_TYPE_HW_CACHE] = {
+               .user_set = false,
+
+               .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+                             PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+                             PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+               .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+       },
+
        [PERF_TYPE_RAW] = {
                .user_set = false,
 
@@ -1822,6 +1834,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
        struct perf_evlist *evlist;
        struct perf_evsel *evsel, *pos;
        int err;
+       static struct perf_evsel_script *es;
 
        err = perf_event__process_attr(tool, event, pevlist);
        if (err)
@@ -1830,6 +1843,19 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
        evlist = *pevlist;
        evsel = perf_evlist__last(*pevlist);
 
+       if (!evsel->priv) {
+               if (scr->per_event_dump) {
+                       evsel->priv = perf_evsel_script__new(evsel,
+                                               scr->session->data);
+               } else {
+                       es = zalloc(sizeof(*es));
+                       if (!es)
+                               return -ENOMEM;
+                       es->fp = stdout;
+                       evsel->priv = es;
+               }
+       }
+
        if (evsel->attr.type >= PERF_TYPE_MAX &&
            evsel->attr.type != PERF_TYPE_SYNTH)
                return 0;
@@ -3018,6 +3044,15 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
        return set_maps(script);
 }
 
+static int process_feature_event(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session)
+{
+       if (event->feat.feat_id < HEADER_LAST_FEATURE)
+               return perf_event__process_feature(tool, event, session);
+       return 0;
+}
+
 #ifdef HAVE_AUXTRACE_SUPPORT
 static int perf_script__process_auxtrace_info(struct perf_tool *tool,
                                              union perf_event *event,
@@ -3062,7 +3097,7 @@ int cmd_script(int argc, const char **argv)
                        .attr            = process_attr,
                        .event_update   = perf_event__process_event_update,
                        .tracing_data    = perf_event__process_tracing_data,
-                       .feature         = perf_event__process_feature,
+                       .feature         = process_feature_event,
                        .build_id        = perf_event__process_build_id,
                        .id_index        = perf_event__process_id_index,
                        .auxtrace_info   = perf_script__process_auxtrace_info,
@@ -3113,8 +3148,9 @@ int cmd_script(int argc, const char **argv)
                     "+field to add and -field to remove."
                     "Valid types: hw,sw,trace,raw,synth. "
                     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
-                    "addr,symoff,period,iregs,uregs,brstack,brstacksym,flags,"
-                    "bpf-output,callindent,insn,insnlen,brstackinsn,synth,phys_addr",
+                    "addr,symoff,srcline,period,iregs,uregs,brstack,"
+                    "brstacksym,flags,bpf-output,brstackinsn,brstackoff,"
+                    "callindent,insn,insnlen,synth,phys_addr,metric,misc",
                     parse_output_fields),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                    "system-wide collection from all CPUs"),
index 096ccb25c11ff7786c6df0c24b695cd0eff5bec0..22547a490e1f9ec4e20f18a1b0483c0b2e5b0f32 100644 (file)
@@ -65,6 +65,7 @@
 #include "util/tool.h"
 #include "util/string2.h"
 #include "util/metricgroup.h"
+#include "util/top.h"
 #include "asm/bug.h"
 
 #include <linux/time64.h>
@@ -144,6 +145,8 @@ static struct target target = {
 
 typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
 
+#define METRIC_ONLY_LEN 20
+
 static int                     run_count                       =  1;
 static bool                    no_inherit                      = false;
 static volatile pid_t          child_pid                       = -1;
@@ -173,6 +176,7 @@ static struct cpu_map               *aggr_map;
 static aggr_get_id_t           aggr_get_id;
 static bool                    append_file;
 static bool                    interval_count;
+static bool                    interval_clear;
 static const char              *output_name;
 static int                     output_fd;
 static int                     print_free_counters_hint;
@@ -180,6 +184,7 @@ static int                  print_mixed_hw_group_error;
 static u64                     *walltime_run;
 static bool                    ru_display                      = false;
 static struct rusage           ru_data;
+static unsigned int            metric_only_len                 = METRIC_ONLY_LEN;
 
 struct perf_stat {
        bool                     record;
@@ -967,8 +972,6 @@ static void print_metric_csv(void *ctx,
        fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit);
 }
 
-#define METRIC_ONLY_LEN 20
-
 /* Filter out some columns that don't work well in metrics only mode */
 
 static bool valid_only_metric(const char *unit)
@@ -999,22 +1002,20 @@ static void print_metric_only(void *ctx, const char *color, const char *fmt,
 {
        struct outstate *os = ctx;
        FILE *out = os->fh;
-       int n;
-       char buf[1024];
-       unsigned mlen = METRIC_ONLY_LEN;
+       char buf[1024], str[1024];
+       unsigned mlen = metric_only_len;
 
        if (!valid_only_metric(unit))
                return;
        unit = fixunit(buf, os->evsel, unit);
-       if (color)
-               n = color_fprintf(out, color, fmt, val);
-       else
-               n = fprintf(out, fmt, val);
-       if (n > METRIC_ONLY_LEN)
-               n = METRIC_ONLY_LEN;
        if (mlen < strlen(unit))
                mlen = strlen(unit) + 1;
-       fprintf(out, "%*s", mlen - n, "");
+
+       if (color)
+               mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
+
+       color_snprintf(str, sizeof(str), color ?: "", fmt, val);
+       fprintf(out, "%*s ", mlen, str);
 }
 
 static void print_metric_only_csv(void *ctx, const char *color __maybe_unused,
@@ -1054,7 +1055,7 @@ static void print_metric_header(void *ctx, const char *color __maybe_unused,
        if (csv_output)
                fprintf(os->fh, "%s%s", unit, csv_sep);
        else
-               fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit);
+               fprintf(os->fh, "%*s ", metric_only_len, unit);
 }
 
 static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
@@ -1704,9 +1705,12 @@ static void print_interval(char *prefix, struct timespec *ts)
        FILE *output = stat_config.output;
        static int num_print_interval;
 
+       if (interval_clear)
+               puts(CONSOLE_CLEAR);
+
        sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
 
-       if (num_print_interval == 0 && !csv_output) {
+       if ((num_print_interval == 0 && !csv_output) || interval_clear) {
                switch (stat_config.aggr_mode) {
                case AGGR_SOCKET:
                        fprintf(output, "#           time socket cpus");
@@ -1719,7 +1723,7 @@ static void print_interval(char *prefix, struct timespec *ts)
                                fprintf(output, "             counts %*s events\n", unit_width, "unit");
                        break;
                case AGGR_NONE:
-                       fprintf(output, "#           time CPU");
+                       fprintf(output, "#           time CPU    ");
                        if (!metric_only)
                                fprintf(output, "                counts %*s events\n", unit_width, "unit");
                        break;
@@ -1738,7 +1742,7 @@ static void print_interval(char *prefix, struct timespec *ts)
                }
        }
 
-       if (num_print_interval == 0 && metric_only)
+       if ((num_print_interval == 0 && metric_only) || interval_clear)
                print_metric_headers(" ", true);
        if (++num_print_interval == 25)
                num_print_interval = 0;
@@ -2057,6 +2061,8 @@ static const struct option stat_options[] = {
                    "(overhead is possible for values <= 100ms)"),
        OPT_INTEGER(0, "interval-count", &stat_config.times,
                    "print counts for fixed number of times"),
+       OPT_BOOLEAN(0, "interval-clear", &interval_clear,
+                   "clear screen in between new interval"),
        OPT_UINTEGER(0, "timeout", &stat_config.timeout,
                    "stop workload and print counts after a timeout period in ms (>= 10ms)"),
        OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
@@ -2436,14 +2442,13 @@ static int add_default_attributes(void)
        (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
        (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
 };
+       struct parse_events_error errinfo;
 
        /* Set attrs if no event is selected and !null_run: */
        if (null_run)
                return 0;
 
        if (transaction_run) {
-               struct parse_events_error errinfo;
-
                if (pmu_have_event("cpu", "cycles-ct") &&
                    pmu_have_event("cpu", "el-start"))
                        err = parse_events(evsel_list, transaction_attrs,
@@ -2454,6 +2459,7 @@ static int add_default_attributes(void)
                                           &errinfo);
                if (err) {
                        fprintf(stderr, "Cannot set up transaction events\n");
+                       parse_events_print_error(&errinfo, transaction_attrs);
                        return -1;
                }
                return 0;
@@ -2479,10 +2485,11 @@ static int add_default_attributes(void)
                    pmu_have_event("msr", "smi")) {
                        if (!force_metric_only)
                                metric_only = true;
-                       err = parse_events(evsel_list, smi_cost_attrs, NULL);
+                       err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
                } else {
                        fprintf(stderr, "To measure SMI cost, it needs "
                                "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
+                       parse_events_print_error(&errinfo, smi_cost_attrs);
                        return -1;
                }
                if (err) {
@@ -2517,12 +2524,13 @@ static int add_default_attributes(void)
                if (topdown_attrs[0] && str) {
                        if (warn)
                                arch_topdown_group_warn();
-                       err = parse_events(evsel_list, str, NULL);
+                       err = parse_events(evsel_list, str, &errinfo);
                        if (err) {
                                fprintf(stderr,
                                        "Cannot set up top down events %s: %d\n",
                                        str, err);
                                free(str);
+                               parse_events_print_error(&errinfo, str);
                                return -1;
                        }
                } else {
index 7d40770684549d8691d63a403816b76b5bb7c3ad..61211918bfbaa5eaaba1b90c6664ebbe506da98b 100644 (file)
@@ -1309,6 +1309,11 @@ static int test__checkevent_config_cache(struct perf_evlist *evlist)
        return 0;
 }
 
+static bool test__intel_pt_valid(void)
+{
+       return !!perf_pmu__find("intel_pt");
+}
+
 static int test__intel_pt(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel = perf_evlist__first(evlist);
@@ -1375,6 +1380,7 @@ struct evlist_test {
        const char *name;
        __u32 type;
        const int id;
+       bool (*valid)(void);
        int (*check)(struct perf_evlist *evlist);
 };
 
@@ -1648,6 +1654,7 @@ static struct evlist_test test__events[] = {
        },
        {
                .name  = "intel_pt//u",
+               .valid = test__intel_pt_valid,
                .check = test__intel_pt,
                .id    = 52,
        },
@@ -1686,17 +1693,24 @@ static struct terms_test test__terms[] = {
 
 static int test_event(struct evlist_test *e)
 {
+       struct parse_events_error err = { .idx = 0, };
        struct perf_evlist *evlist;
        int ret;
 
+       if (e->valid && !e->valid()) {
+               pr_debug("... SKIP");
+               return 0;
+       }
+
        evlist = perf_evlist__new();
        if (evlist == NULL)
                return -ENOMEM;
 
-       ret = parse_events(evlist, e->name, NULL);
+       ret = parse_events(evlist, e->name, &err);
        if (ret) {
-               pr_debug("failed to parse event '%s', err %d\n",
-                        e->name, ret);
+               pr_debug("failed to parse event '%s', err %d, str '%s'\n",
+                        e->name, ret, err.str);
+               parse_events_print_error(&err, e->name);
        } else {
                ret = e->check(evlist);
        }
@@ -1714,10 +1728,11 @@ static int test_events(struct evlist_test *events, unsigned cnt)
        for (i = 0; i < cnt; i++) {
                struct evlist_test *e = &events[i];
 
-               pr_debug("running test %d '%s'\n", e->id, e->name);
+               pr_debug("running test %d '%s'", e->id, e->name);
                ret1 = test_event(e);
                if (ret1)
                        ret2 = ret1;
+               pr_debug("\n");
        }
 
        return ret2;
@@ -1799,7 +1814,7 @@ static int test_pmu_events(void)
        }
 
        while (!ret && (ent = readdir(dir))) {
-               struct evlist_test e;
+               struct evlist_test e = { .id = 0, };
                char name[2 * NAME_MAX + 1 + 12 + 3];
 
                /* Names containing . are special and cannot be used directly */
index 40e30a26b23cc260536977fb9a0b17db54aa207a..9497d02f69e6669d8ca19ed753beb1a8477f4006 100644 (file)
@@ -45,6 +45,7 @@ static int session_write_header(char *path)
 
        perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
        perf_header__set_feat(&session->header, HEADER_NRCPUS);
+       perf_header__set_feat(&session->header, HEADER_ARCH);
 
        session->header.data_size += DATA_SIZE;
 
index b085f1b3e34dacdd4764d0704e0c65c6644debf1..4ab663ec3e5ea108ee7df9a189ecc2bc4e996843 100644 (file)
@@ -382,7 +382,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
                        gtk_tree_store_set(store, &iter, col_idx++, s, -1);
                }
 
-               if (hists__has_callchains(hists) &&
+               if (hist_entry__has_callchains(h) &&
                    symbol_conf.use_callchain && hists__has(hists, sym)) {
                        if (callchain_param.mode == CHAIN_GRAPH_REL)
                                total = symbol_conf.cumulate_callchain ?
index bf31ceab33bd487d0021ccd9818384dde51fd371..89512504551b0b198a44ebe30e81d0972b86ce77 100644 (file)
@@ -146,8 +146,15 @@ getBPFObjectFromModule(llvm::Module *Module)
        raw_svector_ostream ostream(*Buffer);
 
        legacy::PassManager PM;
-       if (TargetMachine->addPassesToEmitFile(PM, ostream,
-                                              TargetMachine::CGFT_ObjectFile)) {
+       bool NotAdded;
+#if CLANG_VERSION_MAJOR < 7
+       NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream,
+                                                     TargetMachine::CGFT_ObjectFile);
+#else
+       NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, nullptr,
+                                                     TargetMachine::CGFT_ObjectFile);
+#endif
+       if (NotAdded) {
                llvm::errs() << "TargetMachine can't emit a file of this type\n";
                return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
        }
index 540cd2dcd3e7098b7335c534a0aa7534ba87c889..653ff65aa2c37991763045c1c1bdb9f76f5d473f 100644 (file)
@@ -2129,6 +2129,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
        int cpu_nr = ff->ph->env.nr_cpus_avail;
        u64 size = 0;
        struct perf_header *ph = ff->ph;
+       bool do_core_id_test = true;
 
        ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
        if (!ph->env.cpu)
@@ -2183,6 +2184,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
                return 0;
        }
 
+       /* On s390 the socket_id number is not related to the numbers of cpus.
+        * The socket_id number might be higher than the numbers of cpus.
+        * This depends on the configuration.
+        */
+       if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
+               do_core_id_test = false;
+
        for (i = 0; i < (u32)cpu_nr; i++) {
                if (do_read_u32(ff, &nr))
                        goto free_cpu;
@@ -2192,7 +2200,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
                if (do_read_u32(ff, &nr))
                        goto free_cpu;
 
-               if (nr != (u32)-1 && nr > (u32)cpu_nr) {
+               if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
                        pr_debug("socket_id number is too big."
                                 "You may need to upgrade the perf tool.\n");
                        goto free_cpu;
@@ -3456,7 +3464,7 @@ int perf_event__process_feature(struct perf_tool *tool,
                pr_warning("invalid record type %d in pipe-mode\n", type);
                return 0;
        }
-       if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) {
+       if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
                pr_warning("invalid record type %d in pipe-mode\n", type);
                return -1;
        }
index 52e8fda93a4723f8b19b8fcaf7e6635aae505a6e..828cb9794c7668c9e48d3b9fa527cff394580923 100644 (file)
@@ -370,9 +370,11 @@ void hists__delete_entries(struct hists *hists)
 
 static int hist_entry__init(struct hist_entry *he,
                            struct hist_entry *template,
-                           bool sample_self)
+                           bool sample_self,
+                           size_t callchain_size)
 {
        *he = *template;
+       he->callchain_size = callchain_size;
 
        if (symbol_conf.cumulate_callchain) {
                he->stat_acc = malloc(sizeof(he->stat));
@@ -473,7 +475,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
 
        he = ops->new(callchain_size);
        if (he) {
-               err = hist_entry__init(he, template, sample_self);
+               err = hist_entry__init(he, template, sample_self, callchain_size);
                if (err) {
                        ops->free(he);
                        he = NULL;
@@ -619,9 +621,11 @@ __hists__add_entry(struct hists *hists,
                .raw_data = sample->raw_data,
                .raw_size = sample->raw_size,
                .ops = ops,
-       };
+       }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
 
-       return hists__findnew_entry(hists, &entry, al, sample_self);
+       if (!hists->has_callchains && he && he->callchain_size != 0)
+               hists->has_callchains = true;
+       return he;
 }
 
 struct hist_entry *hists__add_entry(struct hists *hists,
index 06607c434949da48b53099d6129a53e71037711a..73049f7f0f6039e551daedb234d0f1d0a24b544e 100644 (file)
@@ -85,6 +85,7 @@ struct hists {
        struct events_stats     stats;
        u64                     event_stream;
        u16                     col_len[HISTC_NR_COLS];
+       bool                    has_callchains;
        int                     socket_filter;
        struct perf_hpp_list    *hpp_list;
        struct list_head        hpp_formats;
@@ -222,8 +223,7 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
 
 static __pure inline bool hists__has_callchains(struct hists *hists)
 {
-       const struct perf_evsel *evsel = hists_to_evsel(hists);
-       return evsel__has_callchain(evsel);
+       return hists->has_callchains;
 }
 
 int hists__init(void);
index ba4c9dd186434a33c8c33a59ab8884fd7c679dd3..d426761a549d02d67756c541ea7ab0b2a0495e68 100644 (file)
@@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
                if (len < offs)
                        return INTEL_PT_NEED_MORE_BYTES;
                byte = buf[offs++];
-               payload |= (byte >> 1) << shift;
+               payload |= ((uint64_t)byte >> 1) << shift;
        }
 
        packet->type = INTEL_PT_CYC;
index 155d2570274fdae6fbe7caea7bfa1e07953f7948..da8fe57691b8cd0d4c1c22d0cf0fa62595385ac5 100644 (file)
@@ -227,11 +227,16 @@ event_def: event_pmu |
 event_pmu:
 PE_NAME opt_pmu_config
 {
+       struct parse_events_state *parse_state = _parse_state;
+       struct parse_events_error *error = parse_state->error;
        struct list_head *list, *orig_terms, *terms;
 
        if (parse_events_copy_term_list($2, &orig_terms))
                YYABORT;
 
+       if (error)
+               error->idx = @1.first_column;
+
        ALLOC_LIST(list);
        if (parse_events_add_pmu(_parse_state, list, $1, $2, false, false)) {
                struct perf_pmu *pmu = NULL;
index d2fb597c9a8c78d8e8fd8a9890e67f8b8f4432d7..3ba6a1742f9198b2b5279511939d42658ba8a184 100644 (file)
@@ -234,6 +234,74 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
        return 0;
 }
 
+static void perf_pmu_assign_str(char *name, const char *field, char **old_str,
+                               char **new_str)
+{
+       if (!*old_str)
+               goto set_new;
+
+       if (*new_str) { /* Have new string, check with old */
+               if (strcasecmp(*old_str, *new_str))
+                       pr_debug("alias %s differs in field '%s'\n",
+                                name, field);
+               zfree(old_str);
+       } else          /* Nothing new --> keep old string */
+               return;
+set_new:
+       *old_str = *new_str;
+       *new_str = NULL;
+}
+
+static void perf_pmu_update_alias(struct perf_pmu_alias *old,
+                                 struct perf_pmu_alias *newalias)
+{
+       perf_pmu_assign_str(old->name, "desc", &old->desc, &newalias->desc);
+       perf_pmu_assign_str(old->name, "long_desc", &old->long_desc,
+                           &newalias->long_desc);
+       perf_pmu_assign_str(old->name, "topic", &old->topic, &newalias->topic);
+       perf_pmu_assign_str(old->name, "metric_expr", &old->metric_expr,
+                           &newalias->metric_expr);
+       perf_pmu_assign_str(old->name, "metric_name", &old->metric_name,
+                           &newalias->metric_name);
+       perf_pmu_assign_str(old->name, "value", &old->str, &newalias->str);
+       old->scale = newalias->scale;
+       old->per_pkg = newalias->per_pkg;
+       old->snapshot = newalias->snapshot;
+       memcpy(old->unit, newalias->unit, sizeof(old->unit));
+}
+
+/* Delete an alias entry. */
+static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
+{
+       zfree(&newalias->name);
+       zfree(&newalias->desc);
+       zfree(&newalias->long_desc);
+       zfree(&newalias->topic);
+       zfree(&newalias->str);
+       zfree(&newalias->metric_expr);
+       zfree(&newalias->metric_name);
+       parse_events_terms__purge(&newalias->terms);
+       free(newalias);
+}
+
+/* Merge an alias, search in alias list. If this name is already
+ * present merge both of them to combine all information.
+ */
+static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
+                                struct list_head *alist)
+{
+       struct perf_pmu_alias *a;
+
+       list_for_each_entry(a, alist, list) {
+               if (!strcasecmp(newalias->name, a->name)) {
+                       perf_pmu_update_alias(a, newalias);
+                       perf_pmu_free_alias(newalias);
+                       return true;
+               }
+       }
+       return false;
+}
+
 static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                                 char *desc, char *val,
                                 char *long_desc, char *topic,
@@ -241,9 +309,11 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                                 char *metric_expr,
                                 char *metric_name)
 {
+       struct parse_events_term *term;
        struct perf_pmu_alias *alias;
        int ret;
        int num;
+       char newval[256];
 
        alias = malloc(sizeof(*alias));
        if (!alias)
@@ -262,6 +332,27 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                return ret;
        }
 
+       /* Scan event and remove leading zeroes, spaces, newlines, some
+        * platforms have terms specified as
+        * event=0x0091 (read from files ../<PMU>/events/<FILE>
+        * and terms specified as event=0x91 (read from JSON files).
+        *
+        * Rebuild string to make alias->str member comparable.
+        */
+       memset(newval, 0, sizeof(newval));
+       ret = 0;
+       list_for_each_entry(term, &alias->terms, list) {
+               if (ret)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        ",");
+               if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        "%s=%#x", term->config, term->val.num);
+               else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        "%s=%s", term->config, term->val.str);
+       }
+
        alias->name = strdup(name);
        if (dir) {
                /*
@@ -285,9 +376,10 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
        }
        alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
-       alias->str = strdup(val);
+       alias->str = strdup(newval);
 
-       list_add_tail(&alias->list, list);
+       if (!perf_pmu_merge_alias(alias, list))
+               list_add_tail(&alias->list, list);
 
        return 0;
 }
@@ -303,6 +395,9 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
 
        buf[ret] = 0;
 
+       /* Remove trailing newline from sysfs file */
+       rtrim(buf);
+
        return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL,
                                     NULL, NULL, NULL);
 }
index 7cf2d5cc038ea07accaf5ef631c9b23b7b0c207b..8bf302cafcecd6b285d68e2b2c56130019dea101 100644 (file)
@@ -112,6 +112,8 @@ struct hist_entry {
 
        char                    level;
        u8                      filtered;
+
+       u16                     callchain_size;
        union {
                /*
                 * Since perf diff only supports the stdio output, TUI
@@ -153,7 +155,7 @@ struct hist_entry {
 
 static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
 {
-       return hists__has_callchains(he->hists);
+       return he->callchain_size != 0;
 }
 
 static inline bool hist_entry__has_pairs(struct hist_entry *he)
index ca9ef70176249294644a1beeea2b141086cfe75c..d39e4ff7d0bf9256b4b5ff2c03b2cd4e24307fe4 100644 (file)
@@ -56,7 +56,7 @@ name as necessary to disambiguate it from others is necessary.  Note that option
 .PP
 \fB--hide column\fP do not show the specified built-in columns.  May be invoked multiple times, or with a comma-separated list of column names.  Use "--hide sysfs" to hide the sysfs statistics columns as a group.
 .PP
-\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default.  Currently the only built-in counters disabled by default are "usec" and "Time_Of_Day_Seconds".
+\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default.  Currently the only built-in counters disabled by default are "usec", "Time_Of_Day_Seconds", "APIC" and "X2APIC".
 The column name "all" can be used to enable all disabled-by-default built-in counters.
 .PP
 \fB--show column\fP show only the specified built-in columns.  May be invoked multiple times, or with a comma-separated list of column names.  Use "--show sysfs" to show the sysfs statistics columns as a group.
index d6cff3070ebde60d2fa9a54deec6c147b6bda484..4d14bbbf9b639b7152b73d75363672826fd274da 100644 (file)
@@ -109,6 +109,7 @@ unsigned int has_hwp_activity_window;       /* IA32_HWP_REQUEST[bits 41:32] */
 unsigned int has_hwp_epp;              /* IA32_HWP_REQUEST[bits 31:24] */
 unsigned int has_hwp_pkg;              /* IA32_HWP_REQUEST_PKG */
 unsigned int has_misc_feature_control;
+unsigned int first_counter_read = 1;
 
 #define RAPL_PKG               (1 << 0)
                                        /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -170,6 +171,8 @@ struct thread_data {
        unsigned long long  irq_count;
        unsigned int smi_count;
        unsigned int cpu_id;
+       unsigned int apic_id;
+       unsigned int x2apic_id;
        unsigned int flags;
 #define CPU_IS_FIRST_THREAD_IN_CORE    0x2
 #define CPU_IS_FIRST_CORE_IN_PACKAGE   0x4
@@ -381,19 +384,23 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
 }
 
 /*
- * Each string in this array is compared in --show and --hide cmdline.
- * Thus, strings that are proper sub-sets must follow their more specific peers.
+ * This list matches the column headers, except
+ * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time
+ * 2. Core and CPU are moved to the end, we can't have strings that contain them
+ *    matching on them for --show and --hide.
  */
 struct msr_counter bic[] = {
        { 0x0, "usec" },
        { 0x0, "Time_Of_Day_Seconds" },
        { 0x0, "Package" },
+       { 0x0, "Node" },
        { 0x0, "Avg_MHz" },
+       { 0x0, "Busy%" },
        { 0x0, "Bzy_MHz" },
        { 0x0, "TSC_MHz" },
        { 0x0, "IRQ" },
        { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL},
-       { 0x0, "Busy%" },
+       { 0x0, "sysfs" },
        { 0x0, "CPU%c1" },
        { 0x0, "CPU%c3" },
        { 0x0, "CPU%c6" },
@@ -424,73 +431,73 @@ struct msr_counter bic[] = {
        { 0x0, "Cor_J" },
        { 0x0, "GFX_J" },
        { 0x0, "RAM_J" },
-       { 0x0, "Core" },
-       { 0x0, "CPU" },
        { 0x0, "Mod%c6" },
-       { 0x0, "sysfs" },
        { 0x0, "Totl%C0" },
        { 0x0, "Any%C0" },
        { 0x0, "GFX%C0" },
        { 0x0, "CPUGFX%" },
-       { 0x0, "Node%" },
+       { 0x0, "Core" },
+       { 0x0, "CPU" },
+       { 0x0, "APIC" },
+       { 0x0, "X2APIC" },
 };
 
-
-
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
 #define        BIC_USEC        (1ULL << 0)
 #define        BIC_TOD         (1ULL << 1)
 #define        BIC_Package     (1ULL << 2)
-#define        BIC_Avg_MHz     (1ULL << 3)
-#define        BIC_Bzy_MHz     (1ULL << 4)
-#define        BIC_TSC_MHz     (1ULL << 5)
-#define        BIC_IRQ         (1ULL << 6)
-#define        BIC_SMI         (1ULL << 7)
-#define        BIC_Busy        (1ULL << 8)
-#define        BIC_CPU_c1      (1ULL << 9)
-#define        BIC_CPU_c3      (1ULL << 10)
-#define        BIC_CPU_c6      (1ULL << 11)
-#define        BIC_CPU_c7      (1ULL << 12)
-#define        BIC_ThreadC     (1ULL << 13)
-#define        BIC_CoreTmp     (1ULL << 14)
-#define        BIC_CoreCnt     (1ULL << 15)
-#define        BIC_PkgTmp      (1ULL << 16)
-#define        BIC_GFX_rc6     (1ULL << 17)
-#define        BIC_GFXMHz      (1ULL << 18)
-#define        BIC_Pkgpc2      (1ULL << 19)
-#define        BIC_Pkgpc3      (1ULL << 20)
-#define        BIC_Pkgpc6      (1ULL << 21)
-#define        BIC_Pkgpc7      (1ULL << 22)
-#define        BIC_Pkgpc8      (1ULL << 23)
-#define        BIC_Pkgpc9      (1ULL << 24)
-#define        BIC_Pkgpc10     (1ULL << 25)
-#define BIC_CPU_LPI    (1ULL << 26)
-#define BIC_SYS_LPI    (1ULL << 27)
-#define        BIC_PkgWatt     (1ULL << 26)
-#define        BIC_CorWatt     (1ULL << 27)
-#define        BIC_GFXWatt     (1ULL << 28)
-#define        BIC_PkgCnt      (1ULL << 29)
-#define        BIC_RAMWatt     (1ULL << 30)
-#define        BIC_PKG__       (1ULL << 31)
-#define        BIC_RAM__       (1ULL << 32)
-#define        BIC_Pkg_J       (1ULL << 33)
-#define        BIC_Cor_J       (1ULL << 34)
-#define        BIC_GFX_J       (1ULL << 35)
-#define        BIC_RAM_J       (1ULL << 36)
-#define        BIC_Core        (1ULL << 37)
-#define        BIC_CPU         (1ULL << 38)
-#define        BIC_Mod_c6      (1ULL << 39)
-#define        BIC_sysfs       (1ULL << 40)
-#define        BIC_Totl_c0     (1ULL << 41)
-#define        BIC_Any_c0      (1ULL << 42)
-#define        BIC_GFX_c0      (1ULL << 43)
-#define        BIC_CPUGFX      (1ULL << 44)
-#define        BIC_Node        (1ULL << 45)
-
-#define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD)
+#define        BIC_Node        (1ULL << 3)
+#define        BIC_Avg_MHz     (1ULL << 4)
+#define        BIC_Busy        (1ULL << 5)
+#define        BIC_Bzy_MHz     (1ULL << 6)
+#define        BIC_TSC_MHz     (1ULL << 7)
+#define        BIC_IRQ         (1ULL << 8)
+#define        BIC_SMI         (1ULL << 9)
+#define        BIC_sysfs       (1ULL << 10)
+#define        BIC_CPU_c1      (1ULL << 11)
+#define        BIC_CPU_c3      (1ULL << 12)
+#define        BIC_CPU_c6      (1ULL << 13)
+#define        BIC_CPU_c7      (1ULL << 14)
+#define        BIC_ThreadC     (1ULL << 15)
+#define        BIC_CoreTmp     (1ULL << 16)
+#define        BIC_CoreCnt     (1ULL << 17)
+#define        BIC_PkgTmp      (1ULL << 18)
+#define        BIC_GFX_rc6     (1ULL << 19)
+#define        BIC_GFXMHz      (1ULL << 20)
+#define        BIC_Pkgpc2      (1ULL << 21)
+#define        BIC_Pkgpc3      (1ULL << 22)
+#define        BIC_Pkgpc6      (1ULL << 23)
+#define        BIC_Pkgpc7      (1ULL << 24)
+#define        BIC_Pkgpc8      (1ULL << 25)
+#define        BIC_Pkgpc9      (1ULL << 26)
+#define        BIC_Pkgpc10     (1ULL << 27)
+#define BIC_CPU_LPI    (1ULL << 28)
+#define BIC_SYS_LPI    (1ULL << 29)
+#define        BIC_PkgWatt     (1ULL << 30)
+#define        BIC_CorWatt     (1ULL << 31)
+#define        BIC_GFXWatt     (1ULL << 32)
+#define        BIC_PkgCnt      (1ULL << 33)
+#define        BIC_RAMWatt     (1ULL << 34)
+#define        BIC_PKG__       (1ULL << 35)
+#define        BIC_RAM__       (1ULL << 36)
+#define        BIC_Pkg_J       (1ULL << 37)
+#define        BIC_Cor_J       (1ULL << 38)
+#define        BIC_GFX_J       (1ULL << 39)
+#define        BIC_RAM_J       (1ULL << 40)
+#define        BIC_Mod_c6      (1ULL << 41)
+#define        BIC_Totl_c0     (1ULL << 42)
+#define        BIC_Any_c0      (1ULL << 43)
+#define        BIC_GFX_c0      (1ULL << 44)
+#define        BIC_CPUGFX      (1ULL << 45)
+#define        BIC_Core        (1ULL << 46)
+#define        BIC_CPU         (1ULL << 47)
+#define        BIC_APIC        (1ULL << 48)
+#define        BIC_X2APIC      (1ULL << 49)
+
+#define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
 
 unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
-unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs;
+unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
 
 #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
 #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
@@ -517,17 +524,34 @@ void help(void)
        "when COMMAND completes.\n"
        "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
        "to print statistics, until interrupted.\n"
-       "--add          add a counter\n"
-       "               eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
-       "--cpu  cpu-set limit output to summary plus cpu-set:\n"
-       "               {core | package | j,k,l..m,n-p }\n"
-       "--quiet        skip decoding system configuration header\n"
-       "--interval sec.subsec  Override default 5-second measurement interval\n"
-       "--help         print this help message\n"
-       "--list         list column headers only\n"
-       "--num_iterations num   number of the measurement iterations\n"
-       "--out file     create or truncate \"file\" for all output\n"
-       "--version      print version information\n"
+       "  -a, --add    add a counter\n"
+       "                 eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
+       "  -c, --cpu    cpu-set limit output to summary plus cpu-set:\n"
+       "                 {core | package | j,k,l..m,n-p }\n"
+       "  -d, --debug  displays usec, Time_Of_Day_Seconds and more debugging\n"
+       "  -D, --Dump   displays the raw counter values\n"
+       "  -e, --enable [all | column]\n"
+       "               shows all or the specified disabled column\n"
+       "  -H, --hide [column|column,column,...]\n"
+       "               hide the specified column(s)\n"
+       "  -i, --interval sec.subsec\n"
+       "               Override default 5-second measurement interval\n"
+       "  -J, --Joules displays energy in Joules instead of Watts\n"
+       "  -l, --list   list column headers only\n"
+       "  -n, --num_iterations num\n"
+       "               number of the measurement iterations\n"
+       "  -o, --out file\n"
+       "               create or truncate \"file\" for all output\n"
+       "  -q, --quiet  skip decoding system configuration header\n"
+       "  -s, --show [column|column,column,...]\n"
+       "               show only the specified column(s)\n"
+       "  -S, --Summary\n"
+       "               limits output to 1-line system summary per interval\n"
+       "  -T, --TCC temperature\n"
+       "               sets the Thermal Control Circuit temperature in\n"
+       "                 degrees Celsius\n"
+       "  -h, --help   print this help message\n"
+       "  -v, --version        print version information\n"
        "\n"
        "For more help, run \"man turbostat\"\n");
 }
@@ -601,6 +625,10 @@ void print_header(char *delim)
                outp += sprintf(outp, "%sCore", (printed++ ? delim : ""));
        if (DO_BIC(BIC_CPU))
                outp += sprintf(outp, "%sCPU", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_APIC))
+               outp += sprintf(outp, "%sAPIC", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_X2APIC))
+               outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Avg_MHz))
                outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Busy))
@@ -880,6 +908,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_CPU))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_APIC))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_X2APIC))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
        } else {
                if (DO_BIC(BIC_Package)) {
                        if (p)
@@ -904,6 +936,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
                }
                if (DO_BIC(BIC_CPU))
                        outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id);
+               if (DO_BIC(BIC_APIC))
+                       outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id);
+               if (DO_BIC(BIC_X2APIC))
+                       outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id);
        }
 
        if (DO_BIC(BIC_Avg_MHz))
@@ -1231,6 +1267,12 @@ delta_thread(struct thread_data *new, struct thread_data *old,
        int i;
        struct msr_counter *mp;
 
+       /* we run cpuid just the 1st time, copy the results */
+       if (DO_BIC(BIC_APIC))
+               new->apic_id = old->apic_id;
+       if (DO_BIC(BIC_X2APIC))
+               new->x2apic_id = old->x2apic_id;
+
        /*
         * the timestamps from start of measurement interval are in "old"
         * the timestamp from end of measurement interval are in "new"
@@ -1393,6 +1435,12 @@ int sum_counters(struct thread_data *t, struct core_data *c,
        int i;
        struct msr_counter *mp;
 
+       /* copy un-changing apic_id's */
+       if (DO_BIC(BIC_APIC))
+               average.threads.apic_id = t->apic_id;
+       if (DO_BIC(BIC_X2APIC))
+               average.threads.x2apic_id = t->x2apic_id;
+
        /* remember first tv_begin */
        if (average.threads.tv_begin.tv_sec == 0)
                average.threads.tv_begin = t->tv_begin;
@@ -1619,6 +1667,34 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
        return 0;
 }
 
+void get_apic_id(struct thread_data *t)
+{
+       unsigned int eax, ebx, ecx, edx, max_level;
+
+       eax = ebx = ecx = edx = 0;
+
+       if (!genuine_intel)
+               return;
+
+       __cpuid(0, max_level, ebx, ecx, edx);
+
+       __cpuid(1, eax, ebx, ecx, edx);
+       t->apic_id = (ebx >> 24) & 0xf;
+
+       if (max_level < 0xb)
+               return;
+
+       if (!DO_BIC(BIC_X2APIC))
+               return;
+
+       ecx = 0;
+       __cpuid(0xb, eax, ebx, ecx, edx);
+       t->x2apic_id = edx;
+
+       if (debug && (t->apic_id != t->x2apic_id))
+               fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
+}
+
 /*
  * get_counters(...)
  * migrate to cpu
@@ -1632,7 +1708,6 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        struct msr_counter *mp;
        int i;
 
-
        gettimeofday(&t->tv_begin, (struct timezone *)NULL);
 
        if (cpu_migrate(cpu)) {
@@ -1640,6 +1715,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return -1;
        }
 
+       if (first_counter_read)
+               get_apic_id(t);
 retry:
        t->tsc = rdtsc();       /* we are running on local CPU of interest */
 
@@ -2432,6 +2509,12 @@ void set_node_data(void)
                if (pni[pkg].count > topo.nodes_per_pkg)
                        topo.nodes_per_pkg = pni[0].count;
 
+       /* Fake 1 node per pkg for machines that don't
+        * expose nodes and thus avoid -nan results
+        */
+       if (topo.nodes_per_pkg == 0)
+               topo.nodes_per_pkg = 1;
+
        for (cpu = 0; cpu < topo.num_cpus; cpu++) {
                pkg = cpus[cpu].physical_package_id;
                node = cpus[cpu].physical_node_id;
@@ -2879,6 +2962,7 @@ void do_sleep(void)
        }
 }
 
+
 void turbostat_loop()
 {
        int retval;
@@ -2892,6 +2976,7 @@ void turbostat_loop()
 
        snapshot_proc_sysfs_files();
        retval = for_all_cpus(get_counters, EVEN_COUNTERS);
+       first_counter_read = 0;
        if (retval < -1) {
                exit(retval);
        } else if (retval == -1) {
@@ -4392,7 +4477,7 @@ void process_cpuid()
        if (!quiet) {
                fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
                        max_level, family, model, stepping, family, model, stepping);
-               fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n",
+               fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
                        ecx & (1 << 0) ? "SSE3" : "-",
                        ecx & (1 << 3) ? "MONITOR" : "-",
                        ecx & (1 << 6) ? "SMX" : "-",
@@ -4401,6 +4486,7 @@ void process_cpuid()
                        edx & (1 << 4) ? "TSC" : "-",
                        edx & (1 << 5) ? "MSR" : "-",
                        edx & (1 << 22) ? "ACPI-TM" : "-",
+                       edx & (1 << 28) ? "HT" : "-",
                        edx & (1 << 29) ? "TM" : "-");
        }
 
@@ -4652,7 +4738,6 @@ void process_cpuid()
        return;
 }
 
-
 /*
  * in /dev/cpu/ return success for names that are numbers
  * ie. filter out ".", "..", "microcode".
@@ -4842,6 +4927,13 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base,
        struct core_data *c;
        struct pkg_data *p;
 
+
+       /* Workaround for systems where physical_node_id==-1
+        * and logical_node_id==(-1 - topo.num_cpus)
+        */
+       if (node_id < 0)
+               node_id = 0;
+
        t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id);
        c = GET_CORE(core_base, core_id, node_id, pkg_id);
        p = GET_PKG(pkg_base, pkg_id);
@@ -4946,6 +5038,7 @@ int fork_it(char **argv)
 
        snapshot_proc_sysfs_files();
        status = for_all_cpus(get_counters, EVEN_COUNTERS);
+       first_counter_read = 0;
        if (status)
                exit(status);
        /* clear affinity side-effect of get_counters() */
@@ -5009,7 +5102,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 18.06.01"
+       fprintf(outf, "turbostat version 18.06.20"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5381,7 +5474,7 @@ void cmdline(int argc, char **argv)
                        break;
                case 'e':
                        /* --enable specified counter */
-                       bic_enabled |= bic_lookup(optarg, SHOW_LIST);
+                       bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST);
                        break;
                case 'd':
                        debug++;
@@ -5465,7 +5558,6 @@ void cmdline(int argc, char **argv)
 int main(int argc, char **argv)
 {
        outf = stderr;
-
        cmdline(argc, argv);
 
        if (!quiet)
index 7a6214e9ae58d4432668394bf4762a0e2cb5c669..478bf1bcbbf5fb68797c46717dd8b157444328f6 100644 (file)
@@ -61,6 +61,7 @@ $(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
 $(OUTPUT)/test_sock: cgroup_helpers.c
 $(OUTPUT)/test_sock_addr: cgroup_helpers.c
 $(OUTPUT)/test_sockmap: cgroup_helpers.c
+$(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c
 $(OUTPUT)/test_progs: trace_helpers.c
 $(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
 
index c87b4e052ce961e8a9f2a639a7d1bbcae9a828eb..cf16948aad4adb1e32d33d21f0742ddf724e524e 100644 (file)
@@ -118,7 +118,7 @@ static int join_cgroup_from_top(char *cgroup_path)
  *
  * On success, it returns 0, otherwise on failure it returns 1.
  */
-int join_cgroup(char *path)
+int join_cgroup(const char *path)
 {
        char cgroup_path[PATH_MAX + 1];
 
@@ -158,7 +158,7 @@ void cleanup_cgroup_environment(void)
  * On success, it returns the file descriptor. On failure it returns 0.
  * If there is a failure, it prints the error to stderr.
  */
-int create_and_get_cgroup(char *path)
+int create_and_get_cgroup(const char *path)
 {
        char cgroup_path[PATH_MAX + 1];
        int fd;
@@ -186,7 +186,7 @@ int create_and_get_cgroup(char *path)
  * which is an invalid cgroup id.
  * If there is a failure, it prints the error to stderr.
  */
-unsigned long long get_cgroup_id(char *path)
+unsigned long long get_cgroup_id(const char *path)
 {
        int dirfd, err, flags, mount_id, fhsize;
        union {
index 20a4a5dcd469019907341562c799b42a3bd95ddf..d64bb895709078435295e7e8b1d981ff90a16e0b 100644 (file)
@@ -9,10 +9,10 @@
        __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
 
 
-int create_and_get_cgroup(char *path);
-int join_cgroup(char *path);
+int create_and_get_cgroup(const char *path);
+int join_cgroup(const char *path);
 int setup_cgroup_environment(void);
 void cleanup_cgroup_environment(void);
-unsigned long long get_cgroup_id(char *path);
+unsigned long long get_cgroup_id(const char *path);
 
 #endif
index 1eefe211a4a88a3dfbac5be585932384061b9edd..b4994a94968bfd9d12965fd630cba7e99458a30a 100644 (file)
@@ -6,4 +6,15 @@ CONFIG_TEST_BPF=m
 CONFIG_CGROUP_BPF=y
 CONFIG_NETDEVSIM=m
 CONFIG_NET_CLS_ACT=y
+CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_IPIP=y
+CONFIG_IPV6=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_IPV6_GRE=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_VXLAN=y
+CONFIG_GENEVE=y
index 35669ccd4d23b26c7505e8829bcf3876e3bcb3e1..9df0d2ac45f8453b9529c4ea90fb19dba3f86480 100755 (executable)
@@ -1,6 +1,15 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ "$(id -u)" != "0" ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 SRC_TREE=../../../../
 
 test_run()
index ce2e15e4f9760e205ed8e91ab5260a23172ab2e5..677686198df34d799e67c0eb15ab25f4b68eba4c 100755 (executable)
@@ -1,6 +1,15 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 GREEN='\033[0;92m'
 RED='\033[0;31m'
 NC='\033[0m' # No Color
index 1c77994b5e713dfe8aae357dd083c4713080f62a..270fa8f49573207bc973cce2302b53341a6fec5b 100755 (executable)
 # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this
 # datagram can be read on NS6 when binding to fb00::6.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 TMP_FILE="/tmp/selftest_lwt_seg6local.txt"
 
 cleanup()
index e78aad0a68bb9963368a5236377144c8e61cb230..b746227eaff2e73f9b2fef7388e1213d71e2f0cb 100755 (executable)
@@ -163,6 +163,10 @@ def bpftool(args, JSON=True, ns="", fail=True):
 
 def bpftool_prog_list(expected=None, ns=""):
     _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
+    # Remove the base progs
+    for p in base_progs:
+        if p in progs:
+            progs.remove(p)
     if expected is not None:
         if len(progs) != expected:
             fail(True, "%d BPF programs loaded, expected %d" %
@@ -171,6 +175,10 @@ def bpftool_prog_list(expected=None, ns=""):
 
 def bpftool_map_list(expected=None, ns=""):
     _, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
+    # Remove the base maps
+    for m in base_maps:
+        if m in maps:
+            maps.remove(m)
     if expected is not None:
         if len(maps) != expected:
             fail(True, "%d BPF maps loaded, expected %d" %
@@ -331,6 +339,11 @@ class NetdevSim:
         self.dfs = DebugfsDir(self.dfs_dir)
         return self.dfs
 
+    def dfs_read(self, f):
+        path = os.path.join(self.dfs_dir, f)
+        _, data = cmd('cat %s' % (path))
+        return data.strip()
+
     def dfs_num_bound_progs(self):
         path = os.path.join(self.dfs_dir, "bpf_bound_progs")
         _, progs = cmd('ls %s' % (path))
@@ -539,11 +552,11 @@ def check_extack(output, reference, args):
     if skip_extack:
         return
     lines = output.split("\n")
-    comp = len(lines) >= 2 and lines[1] == reference
+    comp = len(lines) >= 2 and lines[1] == 'Error: ' + reference
     fail(not comp, "Missing or incorrect netlink extack message")
 
 def check_extack_nsim(output, reference, args):
-    check_extack(output, "Error: netdevsim: " + reference, args)
+    check_extack(output, "netdevsim: " + reference, args)
 
 def check_no_extack(res, needle):
     fail((res[1] + res[2]).count(needle) or (res[1] + res[2]).count("Warning:"),
@@ -585,8 +598,8 @@ skip(os.getuid() != 0, "test must be run as root")
 # Check tools
 ret, progs = bpftool("prog", fail=False)
 skip(ret != 0, "bpftool not installed")
-# Check no BPF programs are loaded
-skip(len(progs) != 0, "BPF programs already loaded on the system")
+base_progs = progs
+_, base_maps = bpftool("map")
 
 # Check netdevsim
 ret, out = cmd("modprobe netdevsim", fail=False)
@@ -646,7 +659,7 @@ try:
     ret, _, err = sim.cls_bpf_add_filter(obj, skip_sw=True,
                                          fail=False, include_stderr=True)
     fail(ret == 0, "TC filter loaded without enabling TC offloads")
-    check_extack(err, "Error: TC offload is disabled on net device.", args)
+    check_extack(err, "TC offload is disabled on net device.", args)
     sim.wait_for_flush()
 
     sim.set_ethtool_tc_offloads(True)
@@ -686,7 +699,7 @@ try:
                                          skip_sw=True,
                                          fail=False, include_stderr=True)
     fail(ret == 0, "Offloaded a filter to chain other than 0")
-    check_extack(err, "Error: Driver supports only offload of chain 0.", args)
+    check_extack(err, "Driver supports only offload of chain 0.", args)
     sim.tc_flush_filters()
 
     start_test("Test TC replace...")
@@ -806,24 +819,20 @@ try:
          "Device parameters reported for non-offloaded program")
 
     start_test("Test XDP prog replace with bad flags...")
-    ret, _, err = sim.set_xdp(obj, "offload", force=True,
+    ret, _, err = sim.set_xdp(obj, "generic", force=True,
                               fail=False, include_stderr=True)
     fail(ret == 0, "Replaced XDP program with a program in different mode")
-    check_extack_nsim(err, "program loaded with different flags.", args)
+    fail(err.count("File exists") != 1, "Replaced driver XDP with generic")
     ret, _, err = sim.set_xdp(obj, "", force=True,
                               fail=False, include_stderr=True)
     fail(ret == 0, "Replaced XDP program with a program in different mode")
-    check_extack_nsim(err, "program loaded with different flags.", args)
+    check_extack(err, "program loaded with different flags.", args)
 
     start_test("Test XDP prog remove with bad flags...")
-    ret, _, err = sim.unset_xdp("offload", force=True,
-                                fail=False, include_stderr=True)
-    fail(ret == 0, "Removed program with a bad mode mode")
-    check_extack_nsim(err, "program loaded with different flags.", args)
     ret, _, err = sim.unset_xdp("", force=True,
                                 fail=False, include_stderr=True)
-    fail(ret == 0, "Removed program with a bad mode mode")
-    check_extack_nsim(err, "program loaded with different flags.", args)
+    fail(ret == 0, "Removed program with a bad mode")
+    check_extack(err, "program loaded with different flags.", args)
 
     start_test("Test MTU restrictions...")
     ret, _ = sim.set_mtu(9000, fail=False)
@@ -883,6 +892,60 @@ try:
     rm(pin_file)
     bpftool_prog_list_wait(expected=0)
 
+    start_test("Test multi-attachment XDP - attach...")
+    sim.set_xdp(obj, "offload")
+    xdp = sim.ip_link_show(xdp=True)["xdp"]
+    offloaded = sim.dfs_read("bpf_offloaded_id")
+    fail("prog" not in xdp, "Base program not reported in single program mode")
+    fail(len(ipl["xdp"]["attached"]) != 1,
+         "Wrong attached program count with one program")
+
+    sim.set_xdp(obj, "")
+    two_xdps = sim.ip_link_show(xdp=True)["xdp"]
+    offloaded2 = sim.dfs_read("bpf_offloaded_id")
+
+    fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
+    fail("prog" in two_xdps, "Base program reported in multi program mode")
+    fail(xdp["attached"][0] not in two_xdps["attached"],
+         "Offload program not reported after driver activated")
+    fail(len(two_xdps["attached"]) != 2,
+         "Wrong attached program count with two programs")
+    fail(two_xdps["attached"][0]["prog"]["id"] ==
+         two_xdps["attached"][1]["prog"]["id"],
+         "offloaded and drv programs have the same id")
+    fail(offloaded != offloaded2,
+         "offload ID changed after loading driver program")
+
+    start_test("Test multi-attachment XDP - replace...")
+    ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
+    fail(err.count("busy") != 1, "Replaced one of programs without -force")
+
+    start_test("Test multi-attachment XDP - detach...")
+    ret, _, err = sim.unset_xdp("drv", force=True,
+                                fail=False, include_stderr=True)
+    fail(ret == 0, "Removed program with a bad mode")
+    check_extack(err, "program loaded with different flags.", args)
+
+    sim.unset_xdp("offload")
+    xdp = sim.ip_link_show(xdp=True)["xdp"]
+    offloaded = sim.dfs_read("bpf_offloaded_id")
+
+    fail(xdp["mode"] != 1, "Bad mode reported after multiple programs")
+    fail("prog" not in xdp,
+         "Base program not reported after multi program mode")
+    fail(xdp["attached"][0] not in two_xdps["attached"],
+         "Offload program not reported after driver activated")
+    fail(len(ipl["xdp"]["attached"]) != 1,
+         "Wrong attached program count with remaining programs")
+    fail(offloaded != "0", "offload ID reported with only driver program left")
+
+    start_test("Test multi-attachment XDP - device remove...")
+    sim.set_xdp(obj, "offload")
+    sim.remove()
+
+    sim = NetdevSim()
+    sim.set_ethtool_tc_offloads(True)
+
     start_test("Test mixing of TC and XDP...")
     sim.tc_add_ingress()
     sim.set_xdp(obj, "offload")
index a5e76b9219b9d86ab83d92d9f4964fea5d30c915..2e45c92d11111784ff0e5d5b520af48ad986d911 100644 (file)
@@ -998,8 +998,9 @@ int init_pktinfo(int domain, struct cmsghdr *cmsg)
        return 0;
 }
 
-static int sendmsg_to_server(const struct sockaddr_storage *addr,
-                            socklen_t addr_len, int set_cmsg, int *syscall_err)
+static int sendmsg_to_server(int type, const struct sockaddr_storage *addr,
+                            socklen_t addr_len, int set_cmsg, int flags,
+                            int *syscall_err)
 {
        union {
                char buf[CMSG_SPACE(sizeof(struct in6_pktinfo))];
@@ -1022,7 +1023,7 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr,
                goto err;
        }
 
-       fd = socket(domain, SOCK_DGRAM, 0);
+       fd = socket(domain, type, 0);
        if (fd == -1) {
                log_err("Failed to create client socket");
                goto err;
@@ -1052,7 +1053,7 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr,
                }
        }
 
-       if (sendmsg(fd, &hdr, 0) != sizeof(data)) {
+       if (sendmsg(fd, &hdr, flags) != sizeof(data)) {
                log_err("Fail to send message to server");
                *syscall_err = errno;
                goto err;
@@ -1066,6 +1067,15 @@ static int sendmsg_to_server(const struct sockaddr_storage *addr,
        return fd;
 }
 
+static int fastconnect_to_server(const struct sockaddr_storage *addr,
+                                socklen_t addr_len)
+{
+       int sendmsg_err;
+
+       return sendmsg_to_server(SOCK_STREAM, addr, addr_len, /*set_cmsg*/0,
+                                MSG_FASTOPEN, &sendmsg_err);
+}
+
 static int recvmsg_from_client(int sockfd, struct sockaddr_storage *src_addr)
 {
        struct timeval tv;
@@ -1185,6 +1195,20 @@ static int run_connect_test_case(const struct sock_addr_test *test)
        if (cmp_local_ip(clientfd, &expected_src_addr))
                goto err;
 
+       if (test->type == SOCK_STREAM) {
+               /* Test TCP Fast Open scenario */
+               clientfd = fastconnect_to_server(&requested_addr, addr_len);
+               if (clientfd == -1)
+                       goto err;
+
+               /* Make sure src and dst addrs were overridden properly */
+               if (cmp_peer_addr(clientfd, &expected_addr))
+                       goto err;
+
+               if (cmp_local_ip(clientfd, &expected_src_addr))
+                       goto err;
+       }
+
        goto out;
 err:
        err = -1;
@@ -1222,8 +1246,9 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
                if (clientfd >= 0)
                        close(clientfd);
 
-               clientfd = sendmsg_to_server(&requested_addr, addr_len,
-                                            set_cmsg, &err);
+               clientfd = sendmsg_to_server(test->type, &requested_addr,
+                                            addr_len, set_cmsg, /*flags*/0,
+                                            &err);
                if (err)
                        goto out;
                else if (clientfd == -1)
index 05c8cb71724ae8c1d8d7c3e3453bce9a83092b96..9e78df207919366fbdd048f7645568ac701ad5f5 100644 (file)
@@ -1413,18 +1413,12 @@ static int test_suite(void)
 
 int main(int argc, char **argv)
 {
-       struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
        int iov_count = 1, length = 1024, rate = 1;
        struct sockmap_options options = {0};
        int opt, longindex, err, cg_fd = 0;
        char *bpf_file = BPF_SOCKMAP_FILENAME;
        int test = PING_PONG;
 
-       if (setrlimit(RLIMIT_MEMLOCK, &r)) {
-               perror("setrlimit(RLIMIT_MEMLOCK)");
-               return 1;
-       }
-
        if (argc < 2)
                return test_suite();
 
index 2fe43289943c7fcf2fc5715c5abb4b4d10d06b1f..7bcfa62070056e0a07d6d8c03e25ccfa37e2bb0b 100644 (file)
@@ -12,5 +12,6 @@ struct tcpbpf_globals {
        __u32 good_cb_test_rv;
        __u64 bytes_received;
        __u64 bytes_acked;
+       __u32 num_listen;
 };
 #endif
index 3e645ee41ed5fcc033266645b8e893c143fb79fc..4b7fd540cea9dd89d506bfe693be0414e79b6d90 100644 (file)
@@ -96,15 +96,22 @@ int bpf_testcb(struct bpf_sock_ops *skops)
                        if (!gp)
                                break;
                        g = *gp;
-                       g.total_retrans = skops->total_retrans;
-                       g.data_segs_in = skops->data_segs_in;
-                       g.data_segs_out = skops->data_segs_out;
-                       g.bytes_received = skops->bytes_received;
-                       g.bytes_acked = skops->bytes_acked;
+                       if (skops->args[0] == BPF_TCP_LISTEN) {
+                               g.num_listen++;
+                       } else {
+                               g.total_retrans = skops->total_retrans;
+                               g.data_segs_in = skops->data_segs_in;
+                               g.data_segs_out = skops->data_segs_out;
+                               g.bytes_received = skops->bytes_received;
+                               g.bytes_acked = skops->bytes_acked;
+                       }
                        bpf_map_update_elem(&global_map, &key, &g,
                                            BPF_ANY);
                }
                break;
+       case BPF_SOCK_OPS_TCP_LISTEN_CB:
+               bpf_sock_ops_cb_flags_set(skops, BPF_SOCK_OPS_STATE_CB_FLAG);
+               break;
        default:
                rv = -1;
        }
index 84ab5163c8281211f606167cfc23ed152efa0fcb..a275c29713760b79a2e74700d14fd78931947976 100644 (file)
@@ -1,27 +1,59 @@
 // SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <stdio.h>
 #include <unistd.h>
 #include <errno.h>
-#include <signal.h>
 #include <string.h>
-#include <assert.h>
-#include <linux/perf_event.h>
-#include <linux/ptrace.h>
 #include <linux/bpf.h>
-#include <sys/ioctl.h>
-#include <sys/time.h>
 #include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
 #include <bpf/bpf.h>
 #include <bpf/libbpf.h>
-#include "bpf_util.h"
+
 #include "bpf_rlimit.h"
-#include <linux/perf_event.h>
+#include "bpf_util.h"
+#include "cgroup_helpers.h"
+
 #include "test_tcpbpf.h"
 
+#define EXPECT_EQ(expected, actual, fmt)                       \
+       do {                                                    \
+               if ((expected) != (actual)) {                   \
+                       printf("  Value of: " #actual "\n"      \
+                              "    Actual: %" fmt "\n"         \
+                              "  Expected: %" fmt "\n",        \
+                              (actual), (expected));           \
+                       goto err;                               \
+               }                                               \
+       } while (0)
+
+int verify_result(const struct tcpbpf_globals *result)
+{
+       __u32 expected_events;
+
+       expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) |
+                          (1 << BPF_SOCK_OPS_RWND_INIT) |
+                          (1 << BPF_SOCK_OPS_TCP_CONNECT_CB) |
+                          (1 << BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB) |
+                          (1 << BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB) |
+                          (1 << BPF_SOCK_OPS_NEEDS_ECN) |
+                          (1 << BPF_SOCK_OPS_STATE_CB) |
+                          (1 << BPF_SOCK_OPS_TCP_LISTEN_CB));
+
+       EXPECT_EQ(expected_events, result->event_map, "#" PRIx32);
+       EXPECT_EQ(501ULL, result->bytes_received, "llu");
+       EXPECT_EQ(1002ULL, result->bytes_acked, "llu");
+       EXPECT_EQ(1, result->data_segs_in, PRIu32);
+       EXPECT_EQ(1, result->data_segs_out, PRIu32);
+       EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32);
+       EXPECT_EQ(0, result->good_cb_test_rv, PRIu32);
+       EXPECT_EQ(1, result->num_listen, PRIu32);
+
+       return 0;
+err:
+       return -1;
+}
+
 static int bpf_find_map(const char *test, struct bpf_object *obj,
                        const char *name)
 {
@@ -35,42 +67,28 @@ static int bpf_find_map(const char *test, struct bpf_object *obj,
        return bpf_map__fd(map);
 }
 
-#define SYSTEM(CMD)                                            \
-       do {                                                    \
-               if (system(CMD)) {                              \
-                       printf("system(%s) FAILS!\n", CMD);     \
-               }                                               \
-       } while (0)
-
 int main(int argc, char **argv)
 {
        const char *file = "test_tcpbpf_kern.o";
        struct tcpbpf_globals g = {0};
-       int cg_fd, prog_fd, map_fd;
-       bool debug_flag = false;
+       const char *cg_path = "/foo";
        int error = EXIT_FAILURE;
        struct bpf_object *obj;
-       char cmd[100], *dir;
-       struct stat buffer;
+       int prog_fd, map_fd;
+       int cg_fd = -1;
        __u32 key = 0;
-       int pid;
        int rv;
 
-       if (argc > 1 && strcmp(argv[1], "-d") == 0)
-               debug_flag = true;
+       if (setup_cgroup_environment())
+               goto err;
 
-       dir = "/tmp/cgroupv2/foo";
+       cg_fd = create_and_get_cgroup(cg_path);
+       if (!cg_fd)
+               goto err;
 
-       if (stat(dir, &buffer) != 0) {
-               SYSTEM("mkdir -p /tmp/cgroupv2");
-               SYSTEM("mount -t cgroup2 none /tmp/cgroupv2");
-               SYSTEM("mkdir -p /tmp/cgroupv2/foo");
-       }
-       pid = (int) getpid();
-       sprintf(cmd, "echo %d >> /tmp/cgroupv2/foo/cgroup.procs", pid);
-       SYSTEM(cmd);
+       if (join_cgroup(cg_path))
+               goto err;
 
-       cg_fd = open(dir, O_DIRECTORY, O_RDONLY);
        if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
                printf("FAILED: load_bpf_file failed for: %s\n", file);
                goto err;
@@ -83,7 +101,10 @@ int main(int argc, char **argv)
                goto err;
        }
 
-       SYSTEM("./tcp_server.py");
+       if (system("./tcp_server.py")) {
+               printf("FAILED: TCP server\n");
+               goto err;
+       }
 
        map_fd = bpf_find_map(__func__, obj, "global_map");
        if (map_fd < 0)
@@ -95,34 +116,16 @@ int main(int argc, char **argv)
                goto err;
        }
 
-       if (g.bytes_received != 501 || g.bytes_acked != 1002 ||
-           g.data_segs_in != 1 || g.data_segs_out != 1 ||
-           (g.event_map ^ 0x47e) != 0 || g.bad_cb_test_rv != 0x80 ||
-               g.good_cb_test_rv != 0) {
+       if (verify_result(&g)) {
                printf("FAILED: Wrong stats\n");
-               if (debug_flag) {
-                       printf("\n");
-                       printf("bytes_received: %d (expecting 501)\n",
-                              (int)g.bytes_received);
-                       printf("bytes_acked:    %d (expecting 1002)\n",
-                              (int)g.bytes_acked);
-                       printf("data_segs_in:   %d (expecting 1)\n",
-                              g.data_segs_in);
-                       printf("data_segs_out:  %d (expecting 1)\n",
-                              g.data_segs_out);
-                       printf("event_map:      0x%x (at least 0x47e)\n",
-                              g.event_map);
-                       printf("bad_cb_test_rv: 0x%x (expecting 0x80)\n",
-                              g.bad_cb_test_rv);
-                       printf("good_cb_test_rv:0x%x (expecting 0)\n",
-                              g.good_cb_test_rv);
-               }
                goto err;
        }
+
        printf("PASSED!\n");
        error = 0;
 err:
        bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
+       close(cg_fd);
+       cleanup_cgroup_environment();
        return error;
-
 }
index aeb2901f21f4737558efbecec73974b17c610a38..546aee3e9fb457ae166c0fda8bc0c3b484f1a19b 100755 (executable)
@@ -608,28 +608,26 @@ setup_xfrm_tunnel()
 test_xfrm_tunnel()
 {
        config_device
-        #tcpdump -nei veth1 ip &
-       output=$(mktemp)
-       cat /sys/kernel/debug/tracing/trace_pipe | tee $output &
-        setup_xfrm_tunnel
+       > /sys/kernel/debug/tracing/trace
+       setup_xfrm_tunnel
        tc qdisc add dev veth1 clsact
        tc filter add dev veth1 proto ip ingress bpf da obj test_tunnel_kern.o \
                sec xfrm_get_state
        ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
        sleep 1
-       grep "reqid 1" $output
+       grep "reqid 1" /sys/kernel/debug/tracing/trace
        check_err $?
-       grep "spi 0x1" $output
+       grep "spi 0x1" /sys/kernel/debug/tracing/trace
        check_err $?
-       grep "remote ip 0xac100164" $output
+       grep "remote ip 0xac100164" /sys/kernel/debug/tracing/trace
        check_err $?
        cleanup
 
        if [ $ret -ne 0 ]; then
-                echo -e ${RED}"FAIL: xfrm tunnel"${NC}
-                return 1
-        fi
-        echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
+               echo -e ${RED}"FAIL: xfrm tunnel"${NC}
+               return 1
+       fi
+       echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
 }
 
 attach_bpf()
@@ -657,6 +655,10 @@ cleanup()
        ip link del ip6geneve11 2> /dev/null
        ip link del erspan11 2> /dev/null
        ip link del ip6erspan11 2> /dev/null
+       ip xfrm policy delete dir out src 10.1.1.200/32 dst 10.1.1.100/32 2> /dev/null
+       ip xfrm policy delete dir in src 10.1.1.100/32 dst 10.1.1.200/32 2> /dev/null
+       ip xfrm state delete src 172.16.1.100 dst 172.16.1.200 proto esp spi 0x1 2> /dev/null
+       ip xfrm state delete src 172.16.1.200 dst 172.16.1.100 proto esp spi 0x2 2> /dev/null
 }
 
 cleanup_exit()
@@ -668,7 +670,7 @@ cleanup_exit()
 
 check()
 {
-       ip link help $1 2>&1 | grep -q "^Usage:"
+       ip link help 2>&1 | grep -q "\s$1\s"
        if [ $? -ne 0 ];then
                echo "SKIP $1: iproute2 not support"
        cleanup
index 3868dcb634201a1016ef5dc0bcba7450cb9eff57..cabe2a3a3b30076f3bcd282758e43774bde542ec 100644 (file)
@@ -88,7 +88,7 @@ static int page_size;
 static int page_cnt = 8;
 static struct perf_event_mmap_page *header;
 
-int perf_event_mmap(int fd)
+int perf_event_mmap_header(int fd, struct perf_event_mmap_page **header)
 {
        void *base;
        int mmap_size;
@@ -102,10 +102,15 @@ int perf_event_mmap(int fd)
                return -1;
        }
 
-       header = base;
+       *header = base;
        return 0;
 }
 
+int perf_event_mmap(int fd)
+{
+       return perf_event_mmap_header(fd, &header);
+}
+
 static int perf_event_poll(int fd)
 {
        struct pollfd pfd = { .fd = fd, .events = POLLIN };
@@ -163,3 +168,42 @@ int perf_event_poller(int fd, perf_event_print_fn output_fn)
 
        return ret;
 }
+
+int perf_event_poller_multi(int *fds, struct perf_event_mmap_page **headers,
+                           int num_fds, perf_event_print_fn output_fn)
+{
+       enum bpf_perf_event_ret ret;
+       struct pollfd *pfds;
+       void *buf = NULL;
+       size_t len = 0;
+       int i;
+
+       pfds = calloc(num_fds, sizeof(*pfds));
+       if (!pfds)
+               return LIBBPF_PERF_EVENT_ERROR;
+
+       for (i = 0; i < num_fds; i++) {
+               pfds[i].fd = fds[i];
+               pfds[i].events = POLLIN;
+       }
+
+       for (;;) {
+               poll(pfds, num_fds, 1000);
+               for (i = 0; i < num_fds; i++) {
+                       if (!pfds[i].revents)
+                               continue;
+
+                       ret = bpf_perf_event_read_simple(headers[i],
+                                                        page_cnt * page_size,
+                                                        page_size, &buf, &len,
+                                                        bpf_perf_event_print,
+                                                        output_fn);
+                       if (ret != LIBBPF_PERF_EVENT_CONT)
+                               break;
+               }
+       }
+       free(buf);
+       free(pfds);
+
+       return ret;
+}
index 3b4bcf7f5084ff55901e508216e24f6853f2214d..18924f23db1b99f30fbf198830868d98d7eb42b4 100644 (file)
@@ -3,6 +3,7 @@
 #define __TRACE_HELPER_H
 
 #include <libbpf.h>
+#include <linux/perf_event.h>
 
 struct ksym {
        long addr;
@@ -16,6 +17,9 @@ long ksym_get_addr(const char *name);
 typedef enum bpf_perf_event_ret (*perf_event_print_fn)(void *data, int size);
 
 int perf_event_mmap(int fd);
+int perf_event_mmap_header(int fd, struct perf_event_mmap_page **header);
 /* return LIBBPF_PERF_EVENT_DONE or LIBBPF_PERF_EVENT_ERROR */
 int perf_event_poller(int fd, perf_event_print_fn output_fn);
+int perf_event_poller_multi(int *fds, struct perf_event_mmap_page **headers,
+                           int num_fds, perf_event_print_fn output_fn);
 #endif
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh
new file mode 100755 (executable)
index 0000000..76f1ab4
--- /dev/null
@@ -0,0 +1,217 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test uses standard topology for testing gretap. See
+# ../../../net/forwarding/mirror_gre_topo_lib.sh for more details.
+#
+# Test offloading various features of offloading gretap mirrors specific to
+# mlxsw.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=6
+source $lib_dir/lib.sh
+source $lib_dir/mirror_lib.sh
+source $lib_dir/mirror_gre_lib.sh
+source $lib_dir/mirror_gre_topo_lib.sh
+
+setup_keyful()
+{
+       tunnel_create gt6-key ip6gretap 2001:db8:3::1 2001:db8:3::2 \
+                     ttl 100 tos inherit allow-localremote \
+                     key 1234
+
+       tunnel_create h3-gt6-key ip6gretap 2001:db8:3::2 2001:db8:3::1 \
+                     key 1234
+       ip link set h3-gt6-key vrf v$h3
+       matchall_sink_create h3-gt6-key
+
+       ip address add dev $swp3 2001:db8:3::1/64
+       ip address add dev $h3 2001:db8:3::2/64
+}
+
+cleanup_keyful()
+{
+       ip address del dev $h3 2001:db8:3::2/64
+       ip address del dev $swp3 2001:db8:3::1/64
+
+       tunnel_destroy h3-gt6-key
+       tunnel_destroy gt6-key
+}
+
+setup_soft()
+{
+       # Set up a topology for testing underlay routes that point at an
+       # unsupported soft device.
+
+       tunnel_create gt6-soft ip6gretap 2001:db8:4::1 2001:db8:4::2 \
+                     ttl 100 tos inherit allow-localremote
+
+       tunnel_create h3-gt6-soft ip6gretap 2001:db8:4::2 2001:db8:4::1
+       ip link set h3-gt6-soft vrf v$h3
+       matchall_sink_create h3-gt6-soft
+
+       ip link add name v1 type veth peer name v2
+       ip link set dev v1 up
+       ip address add dev v1 2001:db8:4::1/64
+
+       ip link set dev v2 vrf v$h3
+       ip link set dev v2 up
+       ip address add dev v2 2001:db8:4::2/64
+}
+
+cleanup_soft()
+{
+       ip link del dev v1
+
+       tunnel_destroy h3-gt6-soft
+       tunnel_destroy gt6-soft
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       swp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       swp3=${NETIFS[p5]}
+       h3=${NETIFS[p6]}
+
+       vrf_prepare
+       mirror_gre_topo_create
+
+       ip address add dev $swp3 2001:db8:2::1/64
+       ip address add dev $h3 2001:db8:2::2/64
+
+       ip address add dev $swp3 192.0.2.129/28
+       ip address add dev $h3 192.0.2.130/28
+
+       setup_keyful
+       setup_soft
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       cleanup_soft
+       cleanup_keyful
+
+       ip address del dev $h3 2001:db8:2::2/64
+       ip address del dev $swp3 2001:db8:2::1/64
+
+       ip address del dev $h3 192.0.2.130/28
+       ip address del dev $swp3 192.0.2.129/28
+
+       mirror_gre_topo_destroy
+       vrf_cleanup
+}
+
+test_span_gre_ttl_inherit()
+{
+       local tundev=$1; shift
+       local type=$1; shift
+       local what=$1; shift
+
+       RET=0
+
+       ip link set dev $tundev type $type ttl inherit
+       mirror_install $swp1 ingress $tundev "matchall $tcflags"
+       fail_test_span_gre_dir $tundev ingress
+
+       ip link set dev $tundev type $type ttl 100
+
+       quick_test_span_gre_dir $tundev ingress
+       mirror_uninstall $swp1 ingress
+
+       log_test "$what: no offload on TTL of inherit ($tcflags)"
+}
+
+test_span_gre_tos_fixed()
+{
+       local tundev=$1; shift
+       local type=$1; shift
+       local what=$1; shift
+
+       RET=0
+
+       ip link set dev $tundev type $type tos 0x10
+       mirror_install $swp1 ingress $tundev "matchall $tcflags"
+       fail_test_span_gre_dir $tundev ingress
+
+       ip link set dev $tundev type $type tos inherit
+       quick_test_span_gre_dir $tundev ingress
+       mirror_uninstall $swp1 ingress
+
+       log_test "$what: no offload on a fixed TOS ($tcflags)"
+}
+
+test_span_failable()
+{
+       local should_fail=$1; shift
+       local tundev=$1; shift
+       local what=$1; shift
+
+       RET=0
+
+       mirror_install $swp1 ingress $tundev "matchall $tcflags"
+       if ((should_fail)); then
+           fail_test_span_gre_dir  $tundev ingress
+       else
+           quick_test_span_gre_dir $tundev ingress
+       fi
+       mirror_uninstall $swp1 ingress
+
+       log_test "$what: should_fail=$should_fail ($tcflags)"
+}
+
+test_failable()
+{
+       local should_fail=$1; shift
+
+       test_span_failable $should_fail gt6-key "mirror to keyful gretap"
+       test_span_failable $should_fail gt6-soft "mirror to gretap w/ soft underlay"
+}
+
+test_sw()
+{
+       slow_path_trap_install $swp1 ingress
+       slow_path_trap_install $swp1 egress
+
+       test_failable 0
+
+       slow_path_trap_uninstall $swp1 egress
+       slow_path_trap_uninstall $swp1 ingress
+}
+
+test_hw()
+{
+       test_failable 1
+
+       test_span_gre_tos_fixed gt4 gretap "mirror to gretap"
+       test_span_gre_tos_fixed gt6 ip6gretap "mirror to ip6gretap"
+
+       test_span_gre_ttl_inherit gt4 gretap "mirror to gretap"
+       test_span_gre_ttl_inherit gt6 ip6gretap "mirror to ip6gretap"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+if ! tc_offload_check; then
+    check_err 1 "Could not test offloaded functionality"
+    log_test "mlxsw-specific tests for mirror to gretap"
+    exit
+fi
+
+tcflags="skip_hw"
+test_sw
+
+tcflags="skip_sw"
+test_hw
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
new file mode 100644 (file)
index 0000000..6f3a70d
--- /dev/null
@@ -0,0 +1,197 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Test offloading a number of mirrors-to-gretap. The test creates a number of
+# tunnels. Then it adds one flower mirror for each of the tunnels, matching a
+# given host IP. Then it generates traffic at each of the host IPs and checks
+# that the traffic has been mirrored at the appropriate tunnel.
+#
+#   +--------------------------+                   +--------------------------+
+#   | H1                       |                   |                       H2 |
+#   |     + $h1                |                   |                $h2 +     |
+#   |     | 2001:db8:1:X::1/64 |                   | 2001:db8:1:X::2/64 |     |
+#   +-----|--------------------+                   +--------------------|-----+
+#         |                                                             |
+#   +-----|-------------------------------------------------------------|-----+
+#   | SW  o--> mirrors                                                  |     |
+#   | +---|-------------------------------------------------------------|---+ |
+#   | |   + $swp1                    BR                           $swp2 +   | |
+#   | +---------------------------------------------------------------------+ |
+#   |                                                                         |
+#   |     + $swp3                          + gt6-<X> (ip6gretap)              |
+#   |     | 2001:db8:2:X::1/64             : loc=2001:db8:2:X::1              |
+#   |     |                                : rem=2001:db8:2:X::2              |
+#   |     |                                : ttl=100                          |
+#   |     |                                : tos=inherit                      |
+#   |     |                                :                                  |
+#   +-----|--------------------------------:----------------------------------+
+#         |                                :
+#   +-----|--------------------------------:----------------------------------+
+#   | H3  + $h3                            + h3-gt6-<X> (ip6gretap)           |
+#   |       2001:db8:2:X::2/64               loc=2001:db8:2:X::2              |
+#   |                                        rem=2001:db8:2:X::1              |
+#   |                                        ttl=100                          |
+#   |                                        tos=inherit                      |
+#   |                                                                         |
+#   +-------------------------------------------------------------------------+
+
+source ../../../../net/forwarding/mirror_lib.sh
+
+MIRROR_NUM_NETIFS=6
+
+mirror_gre_ipv6_addr()
+{
+       local net=$1; shift
+       local num=$1; shift
+
+       printf "2001:db8:%x:%x" $net $num
+}
+
+mirror_gre_tunnels_create()
+{
+       local count=$1; shift
+       local should_fail=$1; shift
+
+       MIRROR_GRE_BATCH_FILE="$(mktemp)"
+       for ((i=0; i < count; ++i)); do
+               local match_dip=$(mirror_gre_ipv6_addr 1 $i)::2
+               local htun=h3-gt6-$i
+               local tun=gt6-$i
+
+               ((mirror_gre_tunnels++))
+
+               ip address add dev $h1 $(mirror_gre_ipv6_addr 1 $i)::1/64
+               ip address add dev $h2 $(mirror_gre_ipv6_addr 1 $i)::2/64
+
+               ip address add dev $swp3 $(mirror_gre_ipv6_addr 2 $i)::1/64
+               ip address add dev $h3 $(mirror_gre_ipv6_addr 2 $i)::2/64
+
+               tunnel_create $tun ip6gretap \
+                             $(mirror_gre_ipv6_addr 2 $i)::1 \
+                             $(mirror_gre_ipv6_addr 2 $i)::2 \
+                             ttl 100 tos inherit allow-localremote
+
+               tunnel_create $htun ip6gretap \
+                             $(mirror_gre_ipv6_addr 2 $i)::2 \
+                             $(mirror_gre_ipv6_addr 2 $i)::1
+               ip link set $htun vrf v$h3
+               matchall_sink_create $htun
+
+               cat >> $MIRROR_GRE_BATCH_FILE <<-EOF
+                       filter add dev $swp1 ingress pref 1000 \
+                               protocol ipv6 \
+                               flower $tcflags dst_ip $match_dip \
+                               action mirred egress mirror dev $tun
+               EOF
+       done
+
+       tc -b $MIRROR_GRE_BATCH_FILE
+       check_err_fail $should_fail $? "Mirror rule insertion"
+}
+
+mirror_gre_tunnels_destroy()
+{
+       local count=$1; shift
+
+       for ((i=0; i < count; ++i)); do
+               local htun=h3-gt6-$i
+               local tun=gt6-$i
+
+               ip address del dev $h3 $(mirror_gre_ipv6_addr 2 $i)::2/64
+               ip address del dev $swp3 $(mirror_gre_ipv6_addr 2 $i)::1/64
+
+               ip address del dev $h2 $(mirror_gre_ipv6_addr 1 $i)::2/64
+               ip address del dev $h1 $(mirror_gre_ipv6_addr 1 $i)::1/64
+
+               tunnel_destroy $htun
+               tunnel_destroy $tun
+       done
+}
+
+__mirror_gre_test()
+{
+       local count=$1; shift
+       local should_fail=$1; shift
+
+       mirror_gre_tunnels_create $count $should_fail
+       if ((should_fail)); then
+           return
+       fi
+
+       sleep 5
+
+       for ((i = 0; i < count; ++i)); do
+               local dip=$(mirror_gre_ipv6_addr 1 $i)::2
+               local htun=h3-gt6-$i
+               local message
+
+               icmp6_capture_install $htun
+               mirror_test v$h1 "" $dip $htun 100 10
+               icmp6_capture_uninstall $htun
+       done
+}
+
+mirror_gre_test()
+{
+       local count=$1; shift
+       local should_fail=$1; shift
+
+       if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then
+               check_err 1 "Could not test offloaded functionality"
+               return
+       fi
+
+       tcflags="skip_sw"
+       __mirror_gre_test $count $should_fail
+}
+
+mirror_gre_setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       swp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       swp3=${NETIFS[p5]}
+       h3=${NETIFS[p6]}
+
+       mirror_gre_tunnels=0
+
+       vrf_prepare
+
+       simple_if_init $h1
+       simple_if_init $h2
+       simple_if_init $h3
+
+       ip link add name br1 type bridge vlan_filtering 1
+       ip link set dev br1 up
+
+       ip link set dev $swp1 master br1
+       ip link set dev $swp1 up
+       tc qdisc add dev $swp1 clsact
+
+       ip link set dev $swp2 master br1
+       ip link set dev $swp2 up
+
+       ip link set dev $swp3 up
+}
+
+mirror_gre_cleanup()
+{
+       mirror_gre_tunnels_destroy $mirror_gre_tunnels
+
+       ip link set dev $swp3 down
+
+       ip link set dev $swp2 down
+
+       tc qdisc del dev $swp1 clsact
+       ip link set dev $swp1 down
+
+       ip link del dev br1
+
+       simple_if_fini $h3
+       simple_if_fini $h2
+       simple_if_fini $h1
+
+       vrf_cleanup
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
new file mode 100644 (file)
index 0000000..d231649
--- /dev/null
@@ -0,0 +1,167 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ROUTER_NUM_NETIFS=4
+
+router_h1_create()
+{
+       simple_if_init $h1 192.0.1.1/24
+       ip route add 193.0.0.0/8 via 192.0.1.2 dev $h1
+}
+
+router_h1_destroy()
+{
+       ip route del 193.0.0.0/8 via 192.0.1.2 dev $h1
+       simple_if_fini $h1 192.0.1.1/24
+}
+
+router_h2_create()
+{
+       simple_if_init $h2 192.0.2.1/24
+       tc qdisc add dev $h2 handle ffff: ingress
+}
+
+router_h2_destroy()
+{
+       tc qdisc del dev $h2 handle ffff: ingress
+       simple_if_fini $h2 192.0.2.1/24
+}
+
+router_create()
+{
+       ip link set dev $rp1 up
+       ip link set dev $rp2 up
+
+       ip address add 192.0.1.2/24 dev $rp1
+       ip address add 192.0.2.2/24 dev $rp2
+}
+
+router_destroy()
+{
+       ip address del 192.0.2.2/24 dev $rp2
+       ip address del 192.0.1.2/24 dev $rp1
+
+       ip link set dev $rp2 down
+       ip link set dev $rp1 down
+}
+
+router_setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       rp1=${NETIFS[p2]}
+
+       rp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       h1mac=$(mac_get $h1)
+       rp1mac=$(mac_get $rp1)
+
+       vrf_prepare
+
+       router_h1_create
+       router_h2_create
+
+       router_create
+}
+
+router_offload_validate()
+{
+       local route_count=$1
+       local offloaded_count
+
+       offloaded_count=$(ip route | grep -o 'offload' | wc -l)
+       [[ $offloaded_count -ge $route_count ]]
+}
+
+router_routes_create()
+{
+       local route_count=$1
+       local count=0
+
+       ROUTE_FILE="$(mktemp)"
+
+       for i in {0..255}
+       do
+               for j in {0..255}
+               do
+                       for k in {0..255}
+                       do
+                               if [[ $count -eq $route_count ]]; then
+                                       break 3
+                               fi
+
+                               echo route add 193.${i}.${j}.${k}/32 via \
+                                      192.0.2.1 dev $rp2  >> $ROUTE_FILE
+                               ((count++))
+                       done
+               done
+       done
+
+       ip -b $ROUTE_FILE &> /dev/null
+}
+
+router_routes_destroy()
+{
+       if [[ -v ROUTE_FILE ]]; then
+               rm -f $ROUTE_FILE
+       fi
+}
+
+router_test()
+{
+       local route_count=$1
+       local should_fail=$2
+       local count=0
+
+       RET=0
+
+       router_routes_create $route_count
+
+       router_offload_validate $route_count
+       check_err_fail $should_fail $? "Offload of $route_count routes"
+       if [[ $RET -ne 0 ]] || [[ $should_fail -eq 1 ]]; then
+               return
+       fi
+
+       tc filter add dev $h2 ingress protocol ip pref 1 flower \
+               skip_sw dst_ip 193.0.0.0/8 action drop
+
+       for i in {0..255}
+       do
+               for j in {0..255}
+               do
+                       for k in {0..255}
+                       do
+                               if [[ $count -eq $route_count ]]; then
+                                       break 3
+                               fi
+
+                               $MZ $h1 -c 1 -p 64 -a $h1mac -b $rp1mac \
+                                       -A 192.0.1.1 -B 193.${i}.${j}.${k} \
+                                       -t ip -q
+                               ((count++))
+                       done
+               done
+       done
+
+       tc_check_packets "dev $h2 ingress" 1 $route_count
+       check_err $? "Offload mismatch"
+
+       tc filter del dev $h2 ingress protocol ip pref 1 flower \
+               skip_sw dst_ip 193.0.0.0/8 action drop
+
+       router_routes_destroy
+}
+
+router_cleanup()
+{
+       pre_cleanup
+
+       router_routes_destroy
+       router_destroy
+
+       router_h2_destroy
+       router_h1_destroy
+
+       vrf_cleanup
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh
new file mode 100644 (file)
index 0000000..73035e2
--- /dev/null
@@ -0,0 +1,119 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source "../../../../net/forwarding/devlink_lib.sh"
+
+if [ "$DEVLINK_VIDDID" != "15b3:cb84" ]; then
+       echo "SKIP: test is tailored for Mellanox Spectrum"
+       exit 1
+fi
+
+# Needed for returning to default
+declare -A KVD_DEFAULTS
+
+KVD_CHILDREN="linear hash_single hash_double"
+KVDL_CHILDREN="singles chunks large_chunks"
+
+devlink_sp_resource_minimize()
+{
+       local size
+       local i
+
+       for i in $KVD_CHILDREN; do
+               size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]')
+               devlink_resource_size_set "$size" kvd "$i"
+       done
+
+       for i in $KVDL_CHILDREN; do
+               size=$(devlink_resource_get kvd linear "$i" | \
+                      jq '.["size_min"]')
+               devlink_resource_size_set "$size" kvd linear "$i"
+       done
+}
+
+devlink_sp_size_kvd_to_default()
+{
+       local need_reload=0
+       local i
+
+       for i in $KVD_CHILDREN; do
+               local size=$(echo "${KVD_DEFAULTS[kvd_$i]}" | jq '.["size"]')
+               current_size=$(devlink_resource_size_get kvd "$i")
+
+               if [ "$size" -ne "$current_size" ]; then
+                       devlink_resource_size_set "$size" kvd "$i"
+                       need_reload=1
+               fi
+       done
+
+       for i in $KVDL_CHILDREN; do
+               local size=$(echo "${KVD_DEFAULTS[kvd_linear_$i]}" | \
+                            jq '.["size"]')
+               current_size=$(devlink_resource_size_get kvd linear "$i")
+
+               if [ "$size" -ne "$current_size" ]; then
+                       devlink_resource_size_set "$size" kvd linear "$i"
+                       need_reload=1
+               fi
+       done
+
+       if [ "$need_reload" -ne "0" ]; then
+               devlink_reload
+       fi
+}
+
+devlink_sp_read_kvd_defaults()
+{
+       local key
+       local i
+
+       KVD_DEFAULTS[kvd]=$(devlink_resource_get "kvd")
+       for i in $KVD_CHILDREN; do
+               key=kvd_$i
+               KVD_DEFAULTS[$key]=$(devlink_resource_get kvd "$i")
+       done
+
+       for i in $KVDL_CHILDREN; do
+               key=kvd_linear_$i
+               KVD_DEFAULTS[$key]=$(devlink_resource_get kvd linear "$i")
+       done
+}
+
+KVD_PROFILES="default scale ipv4_max"
+
+devlink_sp_resource_kvd_profile_set()
+{
+       local profile=$1
+
+       case "$profile" in
+       scale)
+               devlink_resource_size_set 64000 kvd linear
+               devlink_resource_size_set 15616 kvd linear singles
+               devlink_resource_size_set 32000 kvd linear chunks
+               devlink_resource_size_set 16384 kvd linear large_chunks
+               devlink_resource_size_set 128000 kvd hash_single
+               devlink_resource_size_set 48000 kvd hash_double
+               devlink_reload
+               ;;
+       ipv4_max)
+               devlink_resource_size_set 64000 kvd linear
+               devlink_resource_size_set 15616 kvd linear singles
+               devlink_resource_size_set 32000 kvd linear chunks
+               devlink_resource_size_set 16384 kvd linear large_chunks
+               devlink_resource_size_set 144000 kvd hash_single
+               devlink_resource_size_set 32768 kvd hash_double
+               devlink_reload
+               ;;
+       default)
+               devlink_resource_size_set 98304 kvd linear
+               devlink_resource_size_set 16384 kvd linear singles
+               devlink_resource_size_set 49152 kvd linear chunks
+               devlink_resource_size_set 32768 kvd linear large_chunks
+               devlink_resource_size_set 87040 kvd hash_single
+               devlink_resource_size_set 60416 kvd hash_double
+               devlink_reload
+               ;;
+       *)
+               check_err 1 "Unknown profile $profile"
+       esac
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh
new file mode 100755 (executable)
index 0000000..b1fe960
--- /dev/null
@@ -0,0 +1,117 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+NUM_NETIFS=1
+source devlink_lib_spectrum.sh
+
+setup_prepare()
+{
+       devlink_sp_read_kvd_defaults
+}
+
+cleanup()
+{
+       pre_cleanup
+       devlink_sp_size_kvd_to_default
+}
+
+trap cleanup EXIT
+
+setup_prepare
+
+profiles_test()
+{
+       local i
+
+       log_info "Running profile tests"
+
+       for i in $KVD_PROFILES; do
+               RET=0
+               devlink_sp_resource_kvd_profile_set $i
+               log_test "'$i' profile"
+       done
+
+       # Default is explicitly tested at end to ensure it's actually applied
+       RET=0
+       devlink_sp_resource_kvd_profile_set "default"
+       log_test "'default' profile"
+}
+
+resources_min_test()
+{
+       local size
+       local i
+       local j
+
+       log_info "Running KVD-minimum tests"
+
+       for i in $KVD_CHILDREN; do
+               RET=0
+               size=$(devlink_resource_get kvd "$i" | jq '.["size_min"]')
+               devlink_resource_size_set "$size" kvd "$i"
+
+               # In case of linear, need to minimize sub-resources as well
+               if [[ "$i" == "linear" ]]; then
+                       for j in $KVDL_CHILDREN; do
+                               devlink_resource_size_set 0 kvd linear "$j"
+                       done
+               fi
+
+               devlink_reload
+               devlink_sp_size_kvd_to_default
+               log_test "'$i' minimize [$size]"
+       done
+}
+
+resources_max_test()
+{
+       local min_size
+       local size
+       local i
+       local j
+
+       log_info "Running KVD-maximum tests"
+       for i in $KVD_CHILDREN; do
+               RET=0
+               devlink_sp_resource_minimize
+
+               # Calculate the maximum possible size for the given partition
+               size=$(devlink_resource_size_get kvd)
+               for j in $KVD_CHILDREN; do
+                       if [ "$i" != "$j" ]; then
+                               min_size=$(devlink_resource_get kvd "$j" | \
+                                          jq '.["size_min"]')
+                               size=$((size - min_size))
+                       fi
+               done
+
+               # Test almost maximum size
+               devlink_resource_size_set "$((size - 128))" kvd "$i"
+               devlink_reload
+               log_test "'$i' almost maximize [$((size - 128))]"
+
+               # Test above maximum size
+               devlink resource set "$DEVLINK_DEV" \
+                       path "kvd/$i" size $((size + 128)) &> /dev/null
+               check_fail $? "Set kvd/$i to size $((size + 128)) should fail"
+               log_test "'$i' Overflow rejection [$((size + 128))]"
+
+               # Test maximum size
+               if [ "$i" == "hash_single" ] || [ "$i" == "hash_double" ]; then
+                       echo "SKIP: Observed problem with exact max $i"
+                       continue
+               fi
+
+               devlink_resource_size_set "$size" kvd "$i"
+               devlink_reload
+               log_test "'$i' maximize [$size]"
+
+               devlink_sp_size_kvd_to_default
+       done
+}
+
+profiles_test
+resources_min_test
+resources_max_test
+
+exit "$RET"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/mirror_gre_scale.sh
new file mode 100644 (file)
index 0000000..8d2186c
--- /dev/null
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../mirror_gre_scale.sh
+
+mirror_gre_get_target()
+{
+       local should_fail=$1; shift
+
+       if ((! should_fail)); then
+               echo 3
+       else
+               echo 4
+       fi
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
new file mode 100755 (executable)
index 0000000..a0a80e1
--- /dev/null
@@ -0,0 +1,55 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+NUM_NETIFS=6
+source ../../../../net/forwarding/lib.sh
+source ../../../../net/forwarding/tc_common.sh
+source devlink_lib_spectrum.sh
+
+current_test=""
+
+cleanup()
+{
+       pre_cleanup
+       if [ ! -z $current_test ]; then
+               ${current_test}_cleanup
+       fi
+       devlink_sp_size_kvd_to_default
+}
+
+devlink_sp_read_kvd_defaults
+trap cleanup EXIT
+
+ALL_TESTS="router tc_flower mirror_gre"
+for current_test in ${TESTS:-$ALL_TESTS}; do
+       source ${current_test}_scale.sh
+
+       num_netifs_var=${current_test^^}_NUM_NETIFS
+       num_netifs=${!num_netifs_var:-$NUM_NETIFS}
+
+       for profile in $KVD_PROFILES; do
+               RET=0
+               devlink_sp_resource_kvd_profile_set $profile
+               if [[ $RET -gt 0 ]]; then
+                       log_test "'$current_test' [$profile] setting"
+                       continue
+               fi
+
+               for should_fail in 0 1; do
+                       RET=0
+                       target=$(${current_test}_get_target "$should_fail")
+                       ${current_test}_setup_prepare
+                       setup_wait $num_netifs
+                       ${current_test}_test "$target" "$should_fail"
+                       ${current_test}_cleanup
+                       if [[ "$should_fail" -eq 0 ]]; then
+                               log_test "'$current_test' [$profile] $target"
+                       else
+                               log_test "'$current_test' [$profile] overflow $target"
+                       fi
+               done
+       done
+done
+current_test=""
+
+exit "$RET"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/router_scale.sh
new file mode 100644 (file)
index 0000000..21c4697
--- /dev/null
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../router_scale.sh
+
+router_get_target()
+{
+       local should_fail=$1
+       local target
+
+       target=$(devlink_resource_size_get kvd hash_single)
+
+       if [[ $should_fail -eq 0 ]]; then
+               target=$((target * 85 / 100))
+       else
+               target=$((target + 1))
+       fi
+
+       echo $target
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_flower_scale.sh
new file mode 100644 (file)
index 0000000..f9bfd89
--- /dev/null
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../tc_flower_scale.sh
+
+tc_flower_get_target()
+{
+       local should_fail=$1; shift
+
+       # 6144 (6x1024) is the theoretical maximum.
+       # One bank of 512 rules is taken by the 18-byte MC router rule.
+       # One rule is the ACL catch-all.
+       # 6144 - 512 - 1 = 5631
+       local target=5631
+
+       if ((! should_fail)); then
+               echo $target
+       else
+               echo $((target + 1))
+       fi
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
new file mode 100644 (file)
index 0000000..a6d733d
--- /dev/null
@@ -0,0 +1,134 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for resource limit of offloaded flower rules. The test adds a given
+# number of flower matches for different IPv6 addresses, then generates traffic,
+# and ensures each was hit exactly once. This file contains functions to set up
+# a testing topology and run the test, and is meant to be sourced from a test
+# script that calls the testing routine with a given number of rules.
+
+TC_FLOWER_NUM_NETIFS=2
+
+tc_flower_h1_create()
+{
+       simple_if_init $h1
+       tc qdisc add dev $h1 clsact
+}
+
+tc_flower_h1_destroy()
+{
+       tc qdisc del dev $h1 clsact
+       simple_if_fini $h1
+}
+
+tc_flower_h2_create()
+{
+       simple_if_init $h2
+       tc qdisc add dev $h2 clsact
+}
+
+tc_flower_h2_destroy()
+{
+       tc qdisc del dev $h2 clsact
+       simple_if_fini $h2
+}
+
+tc_flower_setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       h2=${NETIFS[p2]}
+
+       vrf_prepare
+
+       tc_flower_h1_create
+       tc_flower_h2_create
+}
+
+tc_flower_cleanup()
+{
+       pre_cleanup
+
+       tc_flower_h2_destroy
+       tc_flower_h1_destroy
+
+       vrf_cleanup
+
+       if [[ -v TC_FLOWER_BATCH_FILE ]]; then
+               rm -f $TC_FLOWER_BATCH_FILE
+       fi
+}
+
+tc_flower_addr()
+{
+       local num=$1; shift
+
+       printf "2001:db8:1::%x" $num
+}
+
+tc_flower_rules_create()
+{
+       local count=$1; shift
+       local should_fail=$1; shift
+
+       TC_FLOWER_BATCH_FILE="$(mktemp)"
+
+       for ((i = 0; i < count; ++i)); do
+               cat >> $TC_FLOWER_BATCH_FILE <<-EOF
+                       filter add dev $h2 ingress \
+                               prot ipv6 \
+                               pref 1000 \
+                               flower $tcflags dst_ip $(tc_flower_addr $i) \
+                               action drop
+               EOF
+       done
+
+       tc -b $TC_FLOWER_BATCH_FILE
+       check_err_fail $should_fail $? "Rule insertion"
+}
+
+__tc_flower_test()
+{
+       local count=$1; shift
+       local should_fail=$1; shift
+       local last=$((count - 1))
+
+       tc_flower_rules_create $count $should_fail
+
+       for ((i = 0; i < count; ++i)); do
+               $MZ $h1 -q -c 1 -t ip -p 20 -b bc -6 \
+                       -A 2001:db8:2::1 \
+                       -B $(tc_flower_addr $i)
+       done
+
+       MISMATCHES=$(
+               tc -j -s filter show dev $h2 ingress |
+               jq -r '[ .[] | select(.kind == "flower") | .options |
+                        values as $rule | .actions[].stats.packets |
+                        select(. != 1) | "\(.) on \($rule.keys.dst_ip)" ] |
+                      join(", ")'
+       )
+
+       test -z "$MISMATCHES"
+       check_err $? "Expected to capture 1 packet for each IP, but got $MISMATCHES"
+}
+
+tc_flower_test()
+{
+       local count=$1; shift
+       local should_fail=$1; shift
+
+       # We use lower 16 bits of IPv6 address for match. Also there are only 16
+       # bits of rule priority space.
+       if ((count > 65536)); then
+               check_err 1 "Invalid count of $count. At most 65536 rules supported"
+               return
+       fi
+
+       if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then
+               check_err 1 "Could not test offloaded functionality"
+               return
+       fi
+
+       tcflags="skip_sw"
+       __tc_flower_test $count $should_fail
+}
index 128e548aa377d600f16fa0b9cdb4fbdf9c540cdc..1a0ac3a29ec5f8c9f0052e47e074f40089c634c5 100644 (file)
@@ -12,3 +12,4 @@ tcp_mmap
 udpgso
 udpgso_bench_rx
 udpgso_bench_tx
+tcp_inq
index 663e11e85727416791c1cf4a3ac8ae7a4c64a646..9cca68e440a0b7107c10b1859e6fc8fb2ed2fe4f 100644 (file)
@@ -13,7 +13,7 @@ TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
 TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd
 TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx
 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
-TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict
+TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
 
 include ../lib.mk
 
index 7ba089b33e8b8248ec08d8421a582be66c9f7e87..cd3a2f1545b54c23dab9b534bce9528c57b6c2ec 100644 (file)
@@ -12,3 +12,5 @@ CONFIG_NET_IPVTI=y
 CONFIG_INET6_XFRM_MODE_TUNNEL=y
 CONFIG_IPV6_VTI=y
 CONFIG_DUMMY=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
old mode 100644 (file)
new mode 100755 (executable)
index 4a0964c42860cba4cd8f7a097c376c065cfaf5eb..b8a2af8fcfb796c729ba183c4af729d53e9c482f 100644 (file)
@@ -46,6 +46,8 @@ Guidelines for Writing Tests
 
 o Where possible, reuse an existing topology for different tests instead
   of recreating the same topology.
+o Tests that use anything but the most trivial topologies should include
+  an ASCII art showing the topology.
 o Where possible, IPv6 and IPv4 addresses shall conform to RFC 3849 and
   RFC 5737, respectively.
 o Where possible, tests shall be written so that they can be reused by
diff --git a/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh b/tools/testing/selftests/net/forwarding/bridge_port_isolation.sh
new file mode 100755 (executable)
index 0000000..a43b464
--- /dev/null
@@ -0,0 +1,151 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="ping_ipv4 ping_ipv6 flooding"
+NUM_NETIFS=6
+CHECK_TC="yes"
+source lib.sh
+
+h1_create()
+{
+       simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+       simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+       simple_if_init $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h2_destroy()
+{
+       simple_if_fini $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h3_create()
+{
+       simple_if_init $h3 192.0.2.3/24 2001:db8:1::3/64
+}
+
+h3_destroy()
+{
+       simple_if_fini $h3 192.0.2.3/24 2001:db8:1::3/64
+}
+
+switch_create()
+{
+       ip link add dev br0 type bridge
+
+       ip link set dev $swp1 master br0
+       ip link set dev $swp2 master br0
+       ip link set dev $swp3 master br0
+
+       ip link set dev $swp1 type bridge_slave isolated on
+       check_err $? "Can't set isolation on port $swp1"
+       ip link set dev $swp2 type bridge_slave isolated on
+       check_err $? "Can't set isolation on port $swp2"
+       ip link set dev $swp3 type bridge_slave isolated off
+       check_err $? "Can't disable isolation on port $swp3"
+
+       ip link set dev br0 up
+       ip link set dev $swp1 up
+       ip link set dev $swp2 up
+       ip link set dev $swp3 up
+}
+
+switch_destroy()
+{
+       ip link set dev $swp3 down
+       ip link set dev $swp2 down
+       ip link set dev $swp1 down
+
+       ip link del dev br0
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       swp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       swp3=${NETIFS[p5]}
+       h3=${NETIFS[p6]}
+
+       vrf_prepare
+
+       h1_create
+       h2_create
+       h3_create
+
+       switch_create
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       switch_destroy
+
+       h3_destroy
+       h2_destroy
+       h1_destroy
+
+       vrf_cleanup
+}
+
+ping_ipv4()
+{
+       RET=0
+       ping_do $h1 192.0.2.2
+       check_fail $? "Ping worked when it should not have"
+
+       RET=0
+       ping_do $h3 192.0.2.2
+       check_err $? "Ping didn't work when it should have"
+
+       log_test "Isolated port ping"
+}
+
+ping_ipv6()
+{
+       RET=0
+       ping6_do $h1 2001:db8:1::2
+       check_fail $? "Ping6 worked when it should not have"
+
+       RET=0
+       ping6_do $h3 2001:db8:1::2
+       check_err $? "Ping6 didn't work when it should have"
+
+       log_test "Isolated port ping6"
+}
+
+flooding()
+{
+       local mac=de:ad:be:ef:13:37
+       local ip=192.0.2.100
+
+       RET=0
+       flood_test_do false $mac $ip $h1 $h2
+       check_err $? "Packet was flooded when it should not have been"
+
+       RET=0
+       flood_test_do true $mac $ip $h3 $h2
+       check_err $? "Packet was not flooded when it should have been"
+
+       log_test "Isolated port flooding"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
new file mode 100644 (file)
index 0000000..5ab1e5f
--- /dev/null
@@ -0,0 +1,108 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+##############################################################################
+# Source library
+
+relative_path="${BASH_SOURCE%/*}"
+if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
+       relative_path="."
+fi
+
+source "$relative_path/lib.sh"
+
+##############################################################################
+# Defines
+
+DEVLINK_DEV=$(devlink port show | grep "${NETIFS[p1]}" | \
+             grep -v "${NETIFS[p1]}[0-9]" | cut -d" " -f1 | \
+             rev | cut -d"/" -f2- | rev)
+if [ -z "$DEVLINK_DEV" ]; then
+       echo "SKIP: ${NETIFS[p1]} has no devlink device registered for it"
+       exit 1
+fi
+if [[ "$(echo $DEVLINK_DEV | grep -c pci)" -eq 0 ]]; then
+       echo "SKIP: devlink device's bus is not PCI"
+       exit 1
+fi
+
+DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \
+                -n | cut -d" " -f3)
+
+##############################################################################
+# Sanity checks
+
+devlink -j resource show "$DEVLINK_DEV" &> /dev/null
+if [ $? -ne 0 ]; then
+       echo "SKIP: iproute2 too old, missing devlink resource support"
+       exit 1
+fi
+
+##############################################################################
+# Devlink helpers
+
+devlink_resource_names_to_path()
+{
+       local resource
+       local path=""
+
+       for resource in "${@}"; do
+               if [ "$path" == "" ]; then
+                       path="$resource"
+               else
+                       path="${path}/$resource"
+               fi
+       done
+
+       echo "$path"
+}
+
+devlink_resource_get()
+{
+       local name=$1
+       local resource_name=.[][\"$DEVLINK_DEV\"]
+
+       resource_name="$resource_name | .[] | select (.name == \"$name\")"
+
+       shift
+       for resource in "${@}"; do
+               resource_name="${resource_name} | .[\"resources\"][] | \
+                              select (.name == \"$resource\")"
+       done
+
+       devlink -j resource show "$DEVLINK_DEV" | jq "$resource_name"
+}
+
+devlink_resource_size_get()
+{
+       local size=$(devlink_resource_get "$@" | jq '.["size_new"]')
+
+       if [ "$size" == "null" ]; then
+               devlink_resource_get "$@" | jq '.["size"]'
+       else
+               echo "$size"
+       fi
+}
+
+devlink_resource_size_set()
+{
+       local new_size=$1
+       local path
+
+       shift
+       path=$(devlink_resource_names_to_path "$@")
+       devlink resource set "$DEVLINK_DEV" path "$path" size "$new_size"
+       check_err $? "Failed setting path $path to size $size"
+}
+
+devlink_reload()
+{
+       local still_pending
+
+       devlink dev reload "$DEVLINK_DEV" &> /dev/null
+       check_err $? "Failed reload"
+
+       still_pending=$(devlink resource show "$DEVLINK_DEV" | \
+                       grep -c "size_new")
+       check_err $still_pending "Failed reload - There are still unset sizes"
+}
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
new file mode 100755 (executable)
index 0000000..982cc8c
--- /dev/null
@@ -0,0 +1,354 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test traffic distribution when a wECMP route forwards traffic to two GRE
+# tunnels.
+#
+# +-------------------------+
+# | H1                      |
+# |               $h1 +     |
+# |      192.0.2.1/28 |     |
+# |  2001:db8:1::1/64 |     |
+# +-------------------|-----+
+#                     |
+# +-------------------|------------------------+
+# | SW1               |                        |
+# |              $ol1 +                        |
+# |      192.0.2.2/28                          |
+# |  2001:db8:1::2/64                          |
+# |                                            |
+# |  + g1a (gre)          + g1b (gre)          |
+# |    loc=192.0.2.65       loc=192.0.2.81     |
+# |    rem=192.0.2.66 --.   rem=192.0.2.82 --. |
+# |    tos=inherit      |   tos=inherit      | |
+# |  .------------------'                    | |
+# |  |                    .------------------' |
+# |  v                    v                    |
+# |  + $ul1.111 (vlan)    + $ul1.222 (vlan)    |
+# |  | 192.0.2.129/28     | 192.0.2.145/28     |
+# |   \                  /                     |
+# |    \________________/                      |
+# |            |                               |
+# |            + $ul1                          |
+# +------------|-------------------------------+
+#              |
+# +------------|-------------------------------+
+# | SW2        + $ul2                          |
+# |     _______|________                       |
+# |    /                \                      |
+# |   /                  \                     |
+# |  + $ul2.111 (vlan)    + $ul2.222 (vlan)    |
+# |  ^ 192.0.2.130/28     ^ 192.0.2.146/28     |
+# |  |                    |                    |
+# |  |                    '------------------. |
+# |  '------------------.                    | |
+# |  + g2a (gre)        | + g2b (gre)        | |
+# |    loc=192.0.2.66   |   loc=192.0.2.82   | |
+# |    rem=192.0.2.65 --'   rem=192.0.2.81 --' |
+# |    tos=inherit          tos=inherit        |
+# |                                            |
+# |              $ol2 +                        |
+# |     192.0.2.17/28 |                        |
+# |  2001:db8:2::1/64 |                        |
+# +-------------------|------------------------+
+#                     |
+# +-------------------|-----+
+# | H2                |     |
+# |               $h2 +     |
+# |     192.0.2.18/28       |
+# |  2001:db8:2::2/64       |
+# +-------------------------+
+
+ALL_TESTS="
+       ping_ipv4
+       ping_ipv6
+       multipath_ipv4
+       multipath_ipv6
+       multipath_ipv6_l4
+"
+
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+       simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+       ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
+       ip route add vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+       ip route del vrf v$h1 2001:db8:2::/64 via 2001:db8:1::2
+       ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
+       simple_if_fini $h1 192.0.2.1/28
+}
+
+sw1_create()
+{
+       simple_if_init $ol1 192.0.2.2/28 2001:db8:1::2/64
+       __simple_if_init $ul1 v$ol1
+       vlan_create $ul1 111 v$ol1 192.0.2.129/28
+       vlan_create $ul1 222 v$ol1 192.0.2.145/28
+
+       tunnel_create g1a gre 192.0.2.65 192.0.2.66 tos inherit dev v$ol1
+       __simple_if_init g1a v$ol1 192.0.2.65/32
+       ip route add vrf v$ol1 192.0.2.66/32 via 192.0.2.130
+
+       tunnel_create g1b gre 192.0.2.81 192.0.2.82 tos inherit dev v$ol1
+       __simple_if_init g1b v$ol1 192.0.2.81/32
+       ip route add vrf v$ol1 192.0.2.82/32 via 192.0.2.146
+
+       ip route add vrf v$ol1 192.0.2.16/28 \
+          nexthop dev g1a \
+          nexthop dev g1b
+       ip route add vrf v$ol1 2001:db8:2::/64 \
+          nexthop dev g1a \
+          nexthop dev g1b
+
+       tc qdisc add dev $ul1 clsact
+       tc filter add dev $ul1 egress pref 111 prot 802.1q \
+          flower vlan_id 111 action pass
+       tc filter add dev $ul1 egress pref 222 prot 802.1q \
+          flower vlan_id 222 action pass
+}
+
+sw1_destroy()
+{
+       tc qdisc del dev $ul1 clsact
+
+       ip route del vrf v$ol1 2001:db8:2::/64
+       ip route del vrf v$ol1 192.0.2.16/28
+
+       ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
+       __simple_if_fini g1b 192.0.2.81/32
+       tunnel_destroy g1b
+
+       ip route del vrf v$ol1 192.0.2.66/32 via 192.0.2.130
+       __simple_if_fini g1a 192.0.2.65/32
+       tunnel_destroy g1a
+
+       vlan_destroy $ul1 222
+       vlan_destroy $ul1 111
+       __simple_if_fini $ul1
+       simple_if_fini $ol1 192.0.2.2/28 2001:db8:1::2/64
+}
+
+sw2_create()
+{
+       simple_if_init $ol2 192.0.2.17/28 2001:db8:2::1/64
+       __simple_if_init $ul2 v$ol2
+       vlan_create $ul2 111 v$ol2 192.0.2.130/28
+       vlan_create $ul2 222 v$ol2 192.0.2.146/28
+
+       tunnel_create g2a gre 192.0.2.66 192.0.2.65 tos inherit dev v$ol2
+       __simple_if_init g2a v$ol2 192.0.2.66/32
+       ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+
+       tunnel_create g2b gre 192.0.2.82 192.0.2.81 tos inherit dev v$ol2
+       __simple_if_init g2b v$ol2 192.0.2.82/32
+       ip route add vrf v$ol2 192.0.2.81/32 via 192.0.2.145
+
+       ip route add vrf v$ol2 192.0.2.0/28 \
+          nexthop dev g2a \
+          nexthop dev g2b
+       ip route add vrf v$ol2 2001:db8:1::/64 \
+          nexthop dev g2a \
+          nexthop dev g2b
+}
+
+sw2_destroy()
+{
+       ip route del vrf v$ol2 2001:db8:1::/64
+       ip route del vrf v$ol2 192.0.2.0/28
+
+       ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
+       __simple_if_fini g2b 192.0.2.82/32
+       tunnel_destroy g2b
+
+       ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+       __simple_if_fini g2a 192.0.2.66/32
+       tunnel_destroy g2a
+
+       vlan_destroy $ul2 222
+       vlan_destroy $ul2 111
+       __simple_if_fini $ul2
+       simple_if_fini $ol2 192.0.2.17/28 2001:db8:2::1/64
+}
+
+h2_create()
+{
+       simple_if_init $h2 192.0.2.18/28 2001:db8:2::2/64
+       ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
+       ip route add vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1
+}
+
+h2_destroy()
+{
+       ip route del vrf v$h2 2001:db8:1::/64 via 2001:db8:2::1
+       ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
+       simple_if_fini $h2 192.0.2.18/28 2001:db8:2::2/64
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       ol1=${NETIFS[p2]}
+
+       ul1=${NETIFS[p3]}
+       ul2=${NETIFS[p4]}
+
+       ol2=${NETIFS[p5]}
+       h2=${NETIFS[p6]}
+
+       vrf_prepare
+       h1_create
+       sw1_create
+       sw2_create
+       h2_create
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       h2_destroy
+       sw2_destroy
+       sw1_destroy
+       h1_destroy
+       vrf_cleanup
+}
+
+multipath4_test()
+{
+       local what=$1; shift
+       local weight1=$1; shift
+       local weight2=$1; shift
+
+       sysctl_set net.ipv4.fib_multipath_hash_policy 1
+       ip route replace vrf v$ol1 192.0.2.16/28 \
+          nexthop dev g1a weight $weight1 \
+          nexthop dev g1b weight $weight2
+
+       local t0_111=$(tc_rule_stats_get $ul1 111 egress)
+       local t0_222=$(tc_rule_stats_get $ul1 222 egress)
+
+       ip vrf exec v$h1 \
+          $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
+              -d 1msec -t udp "sp=1024,dp=0-32768"
+
+       local t1_111=$(tc_rule_stats_get $ul1 111 egress)
+       local t1_222=$(tc_rule_stats_get $ul1 222 egress)
+
+       local d111=$((t1_111 - t0_111))
+       local d222=$((t1_222 - t0_222))
+       multipath_eval "$what" $weight1 $weight2 $d111 $d222
+
+       ip route replace vrf v$ol1 192.0.2.16/28 \
+          nexthop dev g1a \
+          nexthop dev g1b
+       sysctl_restore net.ipv4.fib_multipath_hash_policy
+}
+
+multipath6_l4_test()
+{
+       local what=$1; shift
+       local weight1=$1; shift
+       local weight2=$1; shift
+
+       sysctl_set net.ipv6.fib_multipath_hash_policy 1
+       ip route replace vrf v$ol1 2001:db8:2::/64 \
+          nexthop dev g1a weight $weight1 \
+          nexthop dev g1b weight $weight2
+
+       local t0_111=$(tc_rule_stats_get $ul1 111 egress)
+       local t0_222=$(tc_rule_stats_get $ul1 222 egress)
+
+       ip vrf exec v$h1 \
+          $MZ $h1 -6 -q -p 64 -A 2001:db8:1::1 -B 2001:db8:2::2 \
+              -d 1msec -t udp "sp=1024,dp=0-32768"
+
+       local t1_111=$(tc_rule_stats_get $ul1 111 egress)
+       local t1_222=$(tc_rule_stats_get $ul1 222 egress)
+
+       local d111=$((t1_111 - t0_111))
+       local d222=$((t1_222 - t0_222))
+       multipath_eval "$what" $weight1 $weight2 $d111 $d222
+
+       ip route replace vrf v$ol1 2001:db8:2::/64 \
+          nexthop dev g1a \
+          nexthop dev g1b
+       sysctl_restore net.ipv6.fib_multipath_hash_policy
+}
+
+multipath6_test()
+{
+       local what=$1; shift
+       local weight1=$1; shift
+       local weight2=$1; shift
+
+       ip route replace vrf v$ol1 2001:db8:2::/64 \
+          nexthop dev g1a weight $weight1 \
+          nexthop dev g1b weight $weight2
+
+       local t0_111=$(tc_rule_stats_get $ul1 111 egress)
+       local t0_222=$(tc_rule_stats_get $ul1 222 egress)
+
+        # Generate 16384 echo requests, each with a random flow label.
+       for ((i=0; i < 16384; ++i)); do
+               ip vrf exec v$h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null
+       done
+
+       local t1_111=$(tc_rule_stats_get $ul1 111 egress)
+       local t1_222=$(tc_rule_stats_get $ul1 222 egress)
+
+       local d111=$((t1_111 - t0_111))
+       local d222=$((t1_222 - t0_222))
+       multipath_eval "$what" $weight1 $weight2 $d111 $d222
+
+       ip route replace vrf v$ol1 2001:db8:2::/64 \
+          nexthop dev g1a \
+          nexthop dev g1b
+}
+
+ping_ipv4()
+{
+       ping_test $h1 192.0.2.18
+}
+
+ping_ipv6()
+{
+       ping6_test $h1 2001:db8:2::2
+}
+
+multipath_ipv4()
+{
+       log_info "Running IPv4 multipath tests"
+       multipath4_test "ECMP" 1 1
+       multipath4_test "Weighted MP 2:1" 2 1
+       multipath4_test "Weighted MP 11:45" 11 45
+}
+
+multipath_ipv6()
+{
+       log_info "Running IPv6 multipath tests"
+       multipath6_test "ECMP" 1 1
+       multipath6_test "Weighted MP 2:1" 2 1
+       multipath6_test "Weighted MP 11:45" 11 45
+}
+
+multipath_ipv6_l4()
+{
+       log_info "Running IPv6 L4 hash multipath tests"
+       multipath6_l4_test "ECMP" 1 1
+       multipath6_l4_test "Weighted MP 2:1" 2 1
+       multipath6_l4_test "Weighted MP 11:45" 11 45
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
index 7b18a53aa55665a2a979ee895cc03c968107f5eb..2bb9cf303c538798018565049238bb4860b0b975 100644 (file)
@@ -14,8 +14,13 @@ PAUSE_ON_CLEANUP=${PAUSE_ON_CLEANUP:=no}
 NETIF_TYPE=${NETIF_TYPE:=veth}
 NETIF_CREATE=${NETIF_CREATE:=yes}
 
-if [[ -f forwarding.config ]]; then
-       source forwarding.config
+relative_path="${BASH_SOURCE%/*}"
+if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
+       relative_path="."
+fi
+
+if [[ -f $relative_path/forwarding.config ]]; then
+       source "$relative_path/forwarding.config"
 fi
 
 ##############################################################################
@@ -151,6 +156,19 @@ check_fail()
        fi
 }
 
+check_err_fail()
+{
+       local should_fail=$1; shift
+       local err=$1; shift
+       local what=$1; shift
+
+       if ((should_fail)); then
+               check_fail $err "$what succeeded, but should have failed"
+       else
+               check_err $err "$what failed"
+       fi
+}
+
 log_test()
 {
        local test_name=$1
@@ -185,18 +203,27 @@ log_info()
        echo "INFO: $msg"
 }
 
+setup_wait_dev()
+{
+       local dev=$1; shift
+
+       while true; do
+               ip link show dev $dev up \
+                       | grep 'state UP' &> /dev/null
+               if [[ $? -ne 0 ]]; then
+                       sleep 1
+               else
+                       break
+               fi
+       done
+}
+
 setup_wait()
 {
-       for i in $(eval echo {1..$NUM_NETIFS}); do
-               while true; do
-                       ip link show dev ${NETIFS[p$i]} up \
-                               | grep 'state UP' &> /dev/null
-                       if [[ $? -ne 0 ]]; then
-                               sleep 1
-                       else
-                               break
-                       fi
-               done
+       local num_netifs=${1:-$NUM_NETIFS}
+
+       for ((i = 1; i <= num_netifs; ++i)); do
+               setup_wait_dev ${NETIFS[p$i]}
        done
 
        # Make sure links are ready.
@@ -287,6 +314,29 @@ __addr_add_del()
        done
 }
 
+__simple_if_init()
+{
+       local if_name=$1; shift
+       local vrf_name=$1; shift
+       local addrs=("${@}")
+
+       ip link set dev $if_name master $vrf_name
+       ip link set dev $if_name up
+
+       __addr_add_del $if_name add "${addrs[@]}"
+}
+
+__simple_if_fini()
+{
+       local if_name=$1; shift
+       local addrs=("${@}")
+
+       __addr_add_del $if_name del "${addrs[@]}"
+
+       ip link set dev $if_name down
+       ip link set dev $if_name nomaster
+}
+
 simple_if_init()
 {
        local if_name=$1
@@ -298,11 +348,8 @@ simple_if_init()
        array=("${@}")
 
        vrf_create $vrf_name
-       ip link set dev $if_name master $vrf_name
        ip link set dev $vrf_name up
-       ip link set dev $if_name up
-
-       __addr_add_del $if_name add "${array[@]}"
+       __simple_if_init $if_name $vrf_name "${array[@]}"
 }
 
 simple_if_fini()
@@ -315,9 +362,7 @@ simple_if_fini()
        vrf_name=v$if_name
        array=("${@}")
 
-       __addr_add_del $if_name del "${array[@]}"
-
-       ip link set dev $if_name down
+       __simple_if_fini $if_name "${array[@]}"
        vrf_destroy $vrf_name
 }
 
@@ -383,9 +428,10 @@ tc_rule_stats_get()
 {
        local dev=$1; shift
        local pref=$1; shift
+       local dir=$1; shift
 
-       tc -j -s filter show dev $dev ingress pref $pref |
-       jq '.[1].options.actions[].stats.packets'
+       tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \
+           | jq '.[1].options.actions[].stats.packets'
 }
 
 mac_get()
@@ -437,7 +483,9 @@ forwarding_restore()
 
 tc_offload_check()
 {
-       for i in $(eval echo {1..$NUM_NETIFS}); do
+       local num_netifs=${1:-$NUM_NETIFS}
+
+       for ((i = 1; i <= num_netifs; ++i)); do
                ethtool -k ${NETIFS[p$i]} \
                        | grep "hw-tc-offload: on" &> /dev/null
                if [[ $? -ne 0 ]]; then
@@ -453,9 +501,15 @@ trap_install()
        local dev=$1; shift
        local direction=$1; shift
 
-       # For slow-path testing, we need to install a trap to get to
-       # slow path the packets that would otherwise be switched in HW.
-       tc filter add dev $dev $direction pref 1 flower skip_sw action trap
+       # Some devices may not support or need in-hardware trapping of traffic
+       # (e.g. the veth pairs that this library creates for non-existent
+       # loopbacks). Use continue instead, so that there is a filter in there
+       # (some tests check counters), and so that other filters are still
+       # processed.
+       tc filter add dev $dev $direction pref 1 \
+               flower skip_sw action trap 2>/dev/null \
+           || tc filter add dev $dev $direction pref 1 \
+                      flower action continue
 }
 
 trap_uninstall()
@@ -463,11 +517,13 @@ trap_uninstall()
        local dev=$1; shift
        local direction=$1; shift
 
-       tc filter del dev $dev $direction pref 1 flower skip_sw
+       tc filter del dev $dev $direction pref 1 flower
 }
 
 slow_path_trap_install()
 {
+       # For slow-path testing, we need to install a trap to get to
+       # slow path the packets that would otherwise be switched in HW.
        if [ "${tcflags/skip_hw}" != "$tcflags" ]; then
                trap_install "$@"
        fi
@@ -557,33 +613,86 @@ tests_run()
        done
 }
 
+multipath_eval()
+{
+       local desc="$1"
+       local weight_rp12=$2
+       local weight_rp13=$3
+       local packets_rp12=$4
+       local packets_rp13=$5
+       local weights_ratio packets_ratio diff
+
+       RET=0
+
+       if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then
+               weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \
+                               | bc -l)
+       else
+               weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" \
+                               | bc -l)
+       fi
+
+       if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then
+              check_err 1 "Packet difference is 0"
+              log_test "Multipath"
+              log_info "Expected ratio $weights_ratio"
+              return
+       fi
+
+       if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then
+               packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \
+                               | bc -l)
+       else
+               packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" \
+                               | bc -l)
+       fi
+
+       diff=$(echo $weights_ratio - $packets_ratio | bc -l)
+       diff=${diff#-}
+
+       test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0
+       check_err $? "Too large discrepancy between expected and measured ratios"
+       log_test "$desc"
+       log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio"
+}
+
 ##############################################################################
 # Tests
 
-ping_test()
+ping_do()
 {
        local if_name=$1
        local dip=$2
        local vrf_name
 
-       RET=0
-
        vrf_name=$(master_name_get $if_name)
        ip vrf exec $vrf_name $PING $dip -c 10 -i 0.1 -w 2 &> /dev/null
+}
+
+ping_test()
+{
+       RET=0
+
+       ping_do $1 $2
        check_err $?
        log_test "ping"
 }
 
-ping6_test()
+ping6_do()
 {
        local if_name=$1
        local dip=$2
        local vrf_name
 
-       RET=0
-
        vrf_name=$(master_name_get $if_name)
        ip vrf exec $vrf_name $PING6 $dip -c 10 -i 0.1 -w 2 &> /dev/null
+}
+
+ping6_test()
+{
+       RET=0
+
+       ping6_do $1 $2
        check_err $?
        log_test "ping6"
 }
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh
new file mode 100755 (executable)
index 0000000..c5095da
--- /dev/null
@@ -0,0 +1,132 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for "tc action mirred egress mirror" when the underlay route points at a
+# bridge device without vlan filtering (802.1d).
+#
+# This test uses standard topology for testing mirror-to-gretap. See
+# mirror_gre_topo_lib.sh for more details. The full topology is as follows:
+#
+#  +---------------------+                             +---------------------+
+#  | H1                  |                             |                  H2 |
+#  |     + $h1           |                             |           $h2 +     |
+#  |     | 192.0.2.1/28  |                             |  192.0.2.2/28 |     |
+#  +-----|---------------+                             +---------------|-----+
+#        |                                                             |
+#  +-----|-------------------------------------------------------------|-----+
+#  | SW  o---> mirror                                                  |     |
+#  | +---|-------------------------------------------------------------|---+ |
+#  | |   + $swp1            + br1 (802.1q bridge)                $swp2 +   | |
+#  | +---------------------------------------------------------------------+ |
+#  |                                                                         |
+#  | +---------------------------------------------------------------------+ |
+#  | |                      + br2 (802.1d bridge)                          | |
+#  | |                        192.0.2.129/28                               | |
+#  | |   + $swp3              2001:db8:2::1/64                             | |
+#  | +---|-----------------------------------------------------------------+ |
+#  |     |                                          ^                    ^   |
+#  |     |                     + gt6 (ip6gretap)    | + gt4 (gretap)     |   |
+#  |     |                     : loc=2001:db8:2::1  | : loc=192.0.2.129  |   |
+#  |     |                     : rem=2001:db8:2::2 -+ : rem=192.0.2.130 -+   |
+#  |     |                     : ttl=100              : ttl=100              |
+#  |     |                     : tos=inherit          : tos=inherit          |
+#  +-----|---------------------:----------------------:----------------------+
+#        |                     :                      :
+#  +-----|---------------------:----------------------:----------------------+
+#  | H3  + $h3                 + h3-gt6(ip6gretap)    + h3-gt4 (gretap)      |
+#  |       192.0.2.130/28        loc=2001:db8:2::2      loc=192.0.2.130      |
+#  |       2001:db8:2::2/64      rem=2001:db8:2::1      rem=192.0.2.129      |
+#  |                             ttl=100                ttl=100              |
+#  |                             tos=inherit            tos=inherit          |
+#  +-------------------------------------------------------------------------+
+
+ALL_TESTS="
+       test_gretap
+       test_ip6gretap
+"
+
+NUM_NETIFS=6
+source lib.sh
+source mirror_lib.sh
+source mirror_gre_lib.sh
+source mirror_gre_topo_lib.sh
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       swp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       swp3=${NETIFS[p5]}
+       h3=${NETIFS[p6]}
+
+       vrf_prepare
+       mirror_gre_topo_create
+
+       ip link add name br2 type bridge vlan_filtering 0
+       ip link set dev br2 up
+
+       ip link set dev $swp3 master br2
+       ip route add 192.0.2.130/32 dev br2
+       ip -6 route add 2001:db8:2::2/128 dev br2
+
+       ip address add dev br2 192.0.2.129/28
+       ip address add dev br2 2001:db8:2::1/64
+
+       ip address add dev $h3 192.0.2.130/28
+       ip address add dev $h3 2001:db8:2::2/64
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       ip address del dev $h3 2001:db8:2::2/64
+       ip address del dev $h3 192.0.2.130/28
+       ip link del dev br2
+
+       mirror_gre_topo_destroy
+       vrf_cleanup
+}
+
+test_gretap()
+{
+       full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap"
+       full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
+}
+
+test_ip6gretap()
+{
+       full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap"
+       full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
+}
+
+test_all()
+{
+       slow_path_trap_install $swp1 ingress
+       slow_path_trap_install $swp1 egress
+
+       tests_run
+
+       slow_path_trap_uninstall $swp1 egress
+       slow_path_trap_uninstall $swp1 ingress
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tcflags="skip_hw"
+test_all
+
+if ! tc_offload_check; then
+       echo "WARN: Could not test offloaded functionality"
+else
+       tcflags="skip_sw"
+       test_all
+fi
+
+exit $EXIT_STATUS
index 3bb4c2ba7b14281340b4560ce21be3a9bd79bd48..197e769c2ed16a65826c3bd67fb560725a28697b 100755 (executable)
@@ -74,12 +74,14 @@ test_vlan_match()
 
 test_gretap()
 {
-       test_vlan_match gt4 'vlan_id 555 vlan_ethtype ip' "mirror to gretap"
+       test_vlan_match gt4 'skip_hw vlan_id 555 vlan_ethtype ip' \
+                       "mirror to gretap"
 }
 
 test_ip6gretap()
 {
-       test_vlan_match gt6 'vlan_id 555 vlan_ethtype ipv6' "mirror to ip6gretap"
+       test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \
+                       "mirror to ip6gretap"
 }
 
 test_gretap_stp()
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
new file mode 100755 (executable)
index 0000000..a3402cd
--- /dev/null
@@ -0,0 +1,126 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test for "tc action mirred egress mirror" when the underlay route points at a
+# bridge device with vlan filtering (802.1q).
+#
+# This test uses standard topology for testing mirror-to-gretap. See
+# mirror_gre_topo_lib.sh for more details. The full topology is as follows:
+#
+#  +---------------------+                               +---------------------+
+#  | H1                  |                               |                  H2 |
+#  |     + $h1           |                               |           $h2 +     |
+#  |     | 192.0.2.1/28  |                               |  192.0.2.2/28 |     |
+#  +-----|---------------+                               +---------------|-----+
+#        |                                                               |
+#  +-----|---------------------------------------------------------------|-----+
+#  | SW  o---> mirror                                                    |     |
+#  | +---|---------------------------------------------------------------|---+ |
+#  | |   + $swp1                  + br1 (802.1q bridge)            $swp2 +   | |
+#  | |                              192.0.2.129/28                           | |
+#  | |   + $swp3                    2001:db8:2::1/64                         | |
+#  | |   | vid555                   vid555[pvid,untagged]                    | |
+#  | +---|-------------------------------------------------------------------+ |
+#  |     |                                          ^                      ^   |
+#  |     |                     + gt6 (ip6gretap)    |   + gt4 (gretap)     |   |
+#  |     |                     : loc=2001:db8:2::1  |   : loc=192.0.2.129  |   |
+#  |     |                     : rem=2001:db8:2::2 -+   : rem=192.0.2.130 -+   |
+#  |     |                     : ttl=100                : ttl=100              |
+#  |     |                     : tos=inherit            : tos=inherit          |
+#  +-----|---------------------:------------------------:----------------------+
+#        |                     :                        :
+#  +-----|---------------------:------------------------:----------------------+
+#  | H3  + $h3                 + h3-gt6(ip6gretap)      + h3-gt4 (gretap)      |
+#  |     |                       loc=2001:db8:2::2        loc=192.0.2.130      |
+#  |     + $h3.555               rem=2001:db8:2::1        rem=192.0.2.129      |
+#  |       192.0.2.130/28        ttl=100                  ttl=100              |
+#  |       2001:db8:2::2/64      tos=inherit              tos=inherit          |
+#  +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+       test_gretap
+       test_ip6gretap
+"
+
+NUM_NETIFS=6
+source lib.sh
+source mirror_lib.sh
+source mirror_gre_lib.sh
+source mirror_gre_topo_lib.sh
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       swp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       swp3=${NETIFS[p5]}
+       h3=${NETIFS[p6]}
+
+       vrf_prepare
+       mirror_gre_topo_create
+
+       ip link set dev $swp3 master br1
+       bridge vlan add dev br1 vid 555 pvid untagged self
+       ip address add dev br1 192.0.2.129/28
+       ip address add dev br1 2001:db8:2::1/64
+
+       ip -4 route add 192.0.2.130/32 dev br1
+       ip -6 route add 2001:db8:2::2/128 dev br1
+
+       vlan_create $h3 555 v$h3 192.0.2.130/28 2001:db8:2::2/64
+       bridge vlan add dev $swp3 vid 555
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       ip link set dev $swp3 nomaster
+       vlan_destroy $h3 555
+
+       mirror_gre_topo_destroy
+       vrf_cleanup
+}
+
+test_gretap()
+{
+       full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap"
+       full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
+}
+
+test_ip6gretap()
+{
+       full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap"
+       full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
+}
+
+tests()
+{
+       slow_path_trap_install $swp1 ingress
+       slow_path_trap_install $swp1 egress
+
+       tests_run
+
+       slow_path_trap_uninstall $swp1 egress
+       slow_path_trap_uninstall $swp1 ingress
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tcflags="skip_hw"
+tests
+
+if ! tc_offload_check; then
+       echo "WARN: Could not test offloaded functionality"
+else
+       tcflags="skip_sw"
+       tests
+fi
+
+exit $EXIT_STATUS
index aa29d46186a837d53fa8fb76fa4b0161563417e0..135902aa8b11498bfbf99428a77084b4ee3de7fd 100755 (executable)
@@ -122,15 +122,8 @@ test_span_gre_egress_up()
        # After setting the device up, wait for neighbor to get resolved so that
        # we can expect mirroring to work.
        ip link set dev $swp3 up
-       while true; do
-               ip neigh sh dev $swp3 $remote_ip nud reachable |
-                   grep -q ^
-               if [[ $? -ne 0 ]]; then
-                       sleep 1
-               else
-                       break
-               fi
-       done
+       setup_wait_dev $swp3
+       ping -c 1 -I $swp3 $remote_ip &>/dev/null
 
        quick_test_span_gre_dir $tundev ingress
        mirror_uninstall $swp1 ingress
index 619b469365be171c7a0ba39fbc84693214eb9e65..fac486178ef727aa387450042050f02c65c0ff32 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
-source mirror_lib.sh
+source "$relative_path/mirror_lib.sh"
 
 quick_test_span_gre_dir_ips()
 {
@@ -62,7 +62,7 @@ full_test_span_gre_dir_vlan_ips()
                          "$backward_type" "$ip1" "$ip2"
 
        tc filter add dev $h3 ingress pref 77 prot 802.1q \
-               flower $vlan_match ip_proto 0x2f \
+               flower $vlan_match \
                action pass
        mirror_test v$h1 $ip1 $ip2 $h3 77 10
        tc filter del dev $h3 ingress pref 77
index 8fa681eb90e72dee85fa5fc9c44f7b9d0f45a622..6f9ef1820e9370c58ce096d7bd2e2e766f8d0df0 100755 (executable)
@@ -35,6 +35,8 @@ setup_prepare()
        vrf_prepare
        mirror_gre_topo_create
 
+       sysctl_set net.ipv4.conf.v$h3.rp_filter 0
+
        ip address add dev $swp3 192.0.2.161/28
        ip address add dev $h3 192.0.2.162/28
        ip address add dev gt4 192.0.2.129/32
@@ -61,6 +63,8 @@ cleanup()
        ip address del dev $h3 192.0.2.162/28
        ip address del dev $swp3 192.0.2.161/28
 
+       sysctl_restore net.ipv4.conf.v$h3.rp_filter 0
+
        mirror_gre_topo_destroy
        vrf_cleanup
 
index 25341956470840c9f36dd424164ffd10132235a0..39c03e2867f479560a25a622f1bdaa2f3fbbd289 100644 (file)
@@ -33,7 +33,7 @@
 #   |                                                                         |
 #   +-------------------------------------------------------------------------+
 
-source mirror_topo_lib.sh
+source "$relative_path/mirror_topo_lib.sh"
 
 mirror_gre_topo_h3_create()
 {
index 5dbc7a08f4bd5489f46e28522f4de40713c6ace7..d3e75bb6a2d860082d1ed9b806e86c5cd071b4cf 100755 (executable)
@@ -39,6 +39,12 @@ setup_prepare()
        swp3=${NETIFS[p5]}
        h3=${NETIFS[p6]}
 
+       # gt4's remote address is at $h3.555, not $h3. Thus the packets arriving
+       # directly to $h3 for test_gretap_untagged_egress() are rejected by
+       # rp_filter and the test spuriously fails.
+       sysctl_set net.ipv4.conf.all.rp_filter 0
+       sysctl_set net.ipv4.conf.$h3.rp_filter 0
+
        vrf_prepare
        mirror_gre_topo_create
 
@@ -65,6 +71,9 @@ cleanup()
 
        mirror_gre_topo_destroy
        vrf_cleanup
+
+       sysctl_restore net.ipv4.conf.$h3.rp_filter
+       sysctl_restore net.ipv4.conf.all.rp_filter
 }
 
 test_vlan_match()
@@ -79,12 +88,14 @@ test_vlan_match()
 
 test_gretap()
 {
-       test_vlan_match gt4 'vlan_id 555 vlan_ethtype ip' "mirror to gretap"
+       test_vlan_match gt4 'skip_hw vlan_id 555 vlan_ethtype ip' \
+                       "mirror to gretap"
 }
 
 test_ip6gretap()
 {
-       test_vlan_match gt6 'vlan_id 555 vlan_ethtype ipv6' "mirror to ip6gretap"
+       test_vlan_match gt6 'skip_hw vlan_id 555 vlan_ethtype ip' \
+                       "mirror to ip6gretap"
 }
 
 test_span_gre_forbidden_cpu()
index d36dc26c6c516364779e12db4a6532e54cf3f433..07991e1025c70e0e1f5e084fbcef94b7bbaed173 100644 (file)
@@ -105,7 +105,7 @@ do_test_span_vlan_dir_ips()
        # Install the capture as skip_hw to avoid double-counting of packets.
        # The traffic is meant for local box anyway, so will be trapped to
        # kernel.
-       vlan_capture_install $dev "skip_hw vlan_id $vid"
+       vlan_capture_install $dev "skip_hw vlan_id $vid vlan_ethtype ip"
        mirror_test v$h1 $ip1 $ip2 $dev 100 $expect
        mirror_test v$h2 $ip2 $ip1 $dev 100 $expect
        vlan_capture_uninstall $dev
diff --git a/tools/testing/selftests/net/forwarding/router_bridge.sh b/tools/testing/selftests/net/forwarding/router_bridge.sh
new file mode 100755 (executable)
index 0000000..ebc596a
--- /dev/null
@@ -0,0 +1,113 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+       ping_ipv4
+       ping_ipv6
+"
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+       simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+       ip -4 route add 192.0.2.128/28 vrf v$h1 nexthop via 192.0.2.2
+       ip -6 route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+       ip -6 route del 2001:db8:2::/64 vrf v$h1
+       ip -4 route del 192.0.2.128/28 vrf v$h1
+       simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h2_create()
+{
+       simple_if_init $h2 192.0.2.130/28 2001:db8:2::2/64
+       ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.129
+       ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1
+}
+
+h2_destroy()
+{
+       ip -6 route del 2001:db8:1::/64 vrf v$h2
+       ip -4 route del 192.0.2.0/28 vrf v$h2
+       simple_if_fini $h2 192.0.2.130/28 2001:db8:2::2/64
+}
+
+router_create()
+{
+       ip link add name br1 type bridge vlan_filtering 1
+       ip link set dev br1 up
+
+       ip link set dev $swp1 master br1
+       ip link set dev $swp1 up
+       __addr_add_del br1 add 192.0.2.2/28 2001:db8:1::2/64
+
+       ip link set dev $swp2 up
+       __addr_add_del $swp2 add 192.0.2.129/28 2001:db8:2::1/64
+}
+
+router_destroy()
+{
+       __addr_add_del $swp2 del 192.0.2.129/28 2001:db8:2::1/64
+       ip link set dev $swp2 down
+
+       __addr_add_del br1 del 192.0.2.2/28 2001:db8:1::2/64
+       ip link set dev $swp1 down
+       ip link set dev $swp1 nomaster
+
+       ip link del dev br1
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       swp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       vrf_prepare
+
+       h1_create
+       h2_create
+
+       router_create
+
+       forwarding_enable
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       forwarding_restore
+
+       router_destroy
+
+       h2_destroy
+       h1_destroy
+
+       vrf_cleanup
+}
+
+ping_ipv4()
+{
+       ping_test $h1 192.0.2.130
+}
+
+ping_ipv6()
+{
+       ping6_test $h1 2001:db8:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh b/tools/testing/selftests/net/forwarding/router_bridge_vlan.sh
new file mode 100755 (executable)
index 0000000..fef88eb
--- /dev/null
@@ -0,0 +1,132 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+       ping_ipv4
+       ping_ipv6
+       vlan
+"
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+       simple_if_init $h1
+       vlan_create $h1 555 v$h1 192.0.2.1/28 2001:db8:1::1/64
+       ip -4 route add 192.0.2.128/28 vrf v$h1 nexthop via 192.0.2.2
+       ip -6 route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+       ip -6 route del 2001:db8:2::/64 vrf v$h1
+       ip -4 route del 192.0.2.128/28 vrf v$h1
+       vlan_destroy $h1 555
+       simple_if_fini $h1
+}
+
+h2_create()
+{
+       simple_if_init $h2 192.0.2.130/28 2001:db8:2::2/64
+       ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.129
+       ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1
+}
+
+h2_destroy()
+{
+       ip -6 route del 2001:db8:1::/64 vrf v$h2
+       ip -4 route del 192.0.2.0/28 vrf v$h2
+       simple_if_fini $h2 192.0.2.130/28
+}
+
+router_create()
+{
+       ip link add name br1 type bridge vlan_filtering 1
+       ip link set dev br1 up
+
+       ip link set dev $swp1 master br1
+       ip link set dev $swp1 up
+
+       bridge vlan add dev br1 vid 555 self pvid untagged
+       bridge vlan add dev $swp1 vid 555
+
+       __addr_add_del br1 add 192.0.2.2/28 2001:db8:1::2/64
+
+       ip link set dev $swp2 up
+       __addr_add_del $swp2 add 192.0.2.129/28 2001:db8:2::1/64
+}
+
+router_destroy()
+{
+       __addr_add_del $swp2 del 192.0.2.129/28 2001:db8:2::1/64
+       ip link set dev $swp2 down
+
+       __addr_add_del br1 del 192.0.2.2/28 2001:db8:1::2/64
+       ip link set dev $swp1 down
+       ip link set dev $swp1 nomaster
+
+       ip link del dev br1
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       swp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       vrf_prepare
+
+       h1_create
+       h2_create
+
+       router_create
+
+       forwarding_enable
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       forwarding_restore
+
+       router_destroy
+
+       h2_destroy
+       h1_destroy
+
+       vrf_cleanup
+}
+
+vlan()
+{
+       RET=0
+
+       bridge vlan add dev br1 vid 333 self
+       check_err $? "Can't add a non-PVID VLAN"
+       bridge vlan del dev br1 vid 333 self
+       check_err $? "Can't remove a non-PVID VLAN"
+
+       log_test "vlan"
+}
+
+ping_ipv4()
+{
+       ping_test $h1 192.0.2.130
+}
+
+ping_ipv6()
+{
+       ping6_test $h1 2001:db8:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
index 8b6d0fb6d604d4b632edb8150c62f447ba176aec..79a2099279621a7810b2718f76f2964012423fa0 100755 (executable)
@@ -159,45 +159,6 @@ router2_destroy()
        vrf_destroy "vrf-r2"
 }
 
-multipath_eval()
-{
-       local desc="$1"
-       local weight_rp12=$2
-       local weight_rp13=$3
-       local packets_rp12=$4
-       local packets_rp13=$5
-       local weights_ratio packets_ratio diff
-
-       RET=0
-
-       if [[ "$packets_rp12" -eq "0" || "$packets_rp13" -eq "0" ]]; then
-              check_err 1 "Packet difference is 0"
-              log_test "Multipath"
-              log_info "Expected ratio $weights_ratio"
-              return
-       fi
-
-       if [[ "$weight_rp12" -gt "$weight_rp13" ]]; then
-               weights_ratio=$(echo "scale=2; $weight_rp12 / $weight_rp13" \
-                      | bc -l)
-               packets_ratio=$(echo "scale=2; $packets_rp12 / $packets_rp13" \
-                      | bc -l)
-       else
-               weights_ratio=$(echo "scale=2; $weight_rp13 / $weight_rp12" | \
-                      bc -l)
-               packets_ratio=$(echo "scale=2; $packets_rp13 / $packets_rp12" | \
-                      bc -l)
-       fi
-
-       diff=$(echo $weights_ratio - $packets_ratio | bc -l)
-       diff=${diff#-}
-
-       test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0
-       check_err $? "Too large discrepancy between expected and measured ratios"
-       log_test "$desc"
-       log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio"
-}
-
 multipath4_test()
 {
        local desc="$1"
diff --git a/tools/testing/selftests/net/ip6_gre_headroom.sh b/tools/testing/selftests/net/ip6_gre_headroom.sh
new file mode 100755 (executable)
index 0000000..5b41e8b
--- /dev/null
@@ -0,0 +1,65 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that enough headroom is reserved for the first packet passing through an
+# IPv6 GRE-like netdevice.
+
+setup_prepare()
+{
+       ip link add h1 type veth peer name swp1
+       ip link add h3 type veth peer name swp3
+
+       ip link set dev h1 up
+       ip address add 192.0.2.1/28 dev h1
+
+       ip link add dev vh3 type vrf table 20
+       ip link set dev h3 master vh3
+       ip link set dev vh3 up
+       ip link set dev h3 up
+
+       ip link set dev swp3 up
+       ip address add dev swp3 2001:db8:2::1/64
+       ip address add dev swp3 2001:db8:2::3/64
+
+       ip link set dev swp1 up
+       tc qdisc add dev swp1 clsact
+
+       ip link add name er6 type ip6erspan \
+          local 2001:db8:2::1 remote 2001:db8:2::2 oseq okey 123
+       ip link set dev er6 up
+
+       ip link add name gt6 type ip6gretap \
+          local 2001:db8:2::3 remote 2001:db8:2::4
+       ip link set dev gt6 up
+
+       sleep 1
+}
+
+cleanup()
+{
+       ip link del dev gt6
+       ip link del dev er6
+       ip link del dev swp1
+       ip link del dev swp3
+       ip link del dev vh3
+}
+
+test_headroom()
+{
+       local type=$1; shift
+       local tundev=$1; shift
+
+       tc filter add dev swp1 ingress pref 1000 matchall skip_hw \
+               action mirred egress mirror dev $tundev
+       ping -I h1 192.0.2.2 -c 1 -w 2 &> /dev/null
+       tc filter del dev swp1 ingress pref 1000
+
+       # If it doesn't panic, it passes.
+       printf "TEST: %-60s  [PASS]\n" "$type headroom"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+test_headroom ip6gretap gt6
+test_headroom ip6erspan er6
index 0d7a44fa30af2580da73a5e68aeeb8a40c3a229a..08c341b49760f002723c5622ef2ab38436d1f5bb 100755 (executable)
@@ -525,18 +525,21 @@ kci_test_macsec()
 #-------------------------------------------------------------------
 kci_test_ipsec()
 {
-       srcip="14.0.0.52"
-       dstip="14.0.0.70"
+       ret=0
        algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128"
+       srcip=192.168.123.1
+       dstip=192.168.123.2
+       spi=7
+
+       ip addr add $srcip dev $devdummy
 
        # flush to be sure there's nothing configured
        ip x s flush ; ip x p flush
        check_err $?
 
        # start the monitor in the background
-       tmpfile=`mktemp ipsectestXXX`
-       ip x m > $tmpfile &
-       mpid=$!
+       tmpfile=`mktemp /var/run/ipsectestXXX`
+       mpid=`(ip x m > $tmpfile & echo $!) 2>/dev/null`
        sleep 0.2
 
        ipsecid="proto esp src $srcip dst $dstip spi 0x07"
@@ -599,6 +602,7 @@ kci_test_ipsec()
        check_err $?
        ip x p flush
        check_err $?
+       ip addr del $srcip/32 dev $devdummy
 
        if [ $ret -ne 0 ]; then
                echo "FAIL: ipsec"
@@ -607,6 +611,119 @@ kci_test_ipsec()
        echo "PASS: ipsec"
 }
 
+#-------------------------------------------------------------------
+# Example commands
+#   ip x s add proto esp src 14.0.0.52 dst 14.0.0.70 \
+#            spi 0x07 mode transport reqid 0x07 replay-window 32 \
+#            aead 'rfc4106(gcm(aes))' 1234567890123456dcba 128 \
+#            sel src 14.0.0.52/24 dst 14.0.0.70/24
+#            offload dev sim1 dir out
+#   ip x p add dir out src 14.0.0.52/24 dst 14.0.0.70/24 \
+#            tmpl proto esp src 14.0.0.52 dst 14.0.0.70 \
+#            spi 0x07 mode transport reqid 0x07
+#
+#-------------------------------------------------------------------
+kci_test_ipsec_offload()
+{
+       ret=0
+       algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128"
+       srcip=192.168.123.3
+       dstip=192.168.123.4
+       dev=simx1
+       sysfsd=/sys/kernel/debug/netdevsim/$dev
+       sysfsf=$sysfsd/ipsec
+
+       # setup netdevsim since dummydev doesn't have offload support
+       modprobe netdevsim
+       check_err $?
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: ipsec_offload can't load netdevsim"
+               return 1
+       fi
+
+       ip link add $dev type netdevsim
+       ip addr add $srcip dev $dev
+       ip link set $dev up
+       if [ ! -d $sysfsd ] ; then
+               echo "FAIL: ipsec_offload can't create device $dev"
+               return 1
+       fi
+       if [ ! -f $sysfsf ] ; then
+               echo "FAIL: ipsec_offload netdevsim doesn't support IPsec offload"
+               return 1
+       fi
+
+       # flush to be sure there's nothing configured
+       ip x s flush ; ip x p flush
+
+       # create offloaded SAs, both in and out
+       ip x p add dir out src $srcip/24 dst $dstip/24 \
+           tmpl proto esp src $srcip dst $dstip spi 9 \
+           mode transport reqid 42
+       check_err $?
+       ip x p add dir out src $dstip/24 dst $srcip/24 \
+           tmpl proto esp src $dstip dst $srcip spi 9 \
+           mode transport reqid 42
+       check_err $?
+
+       ip x s add proto esp src $srcip dst $dstip spi 9 \
+           mode transport reqid 42 $algo sel src $srcip/24 dst $dstip/24 \
+           offload dev $dev dir out
+       check_err $?
+       ip x s add proto esp src $dstip dst $srcip spi 9 \
+           mode transport reqid 42 $algo sel src $dstip/24 dst $srcip/24 \
+           offload dev $dev dir in
+       check_err $?
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: ipsec_offload can't create SA"
+               return 1
+       fi
+
+       # does offload show up in ip output
+       lines=`ip x s list | grep -c "crypto offload parameters: dev $dev dir"`
+       if [ $lines -ne 2 ] ; then
+               echo "FAIL: ipsec_offload SA offload missing from list output"
+               check_err 1
+       fi
+
+       # use ping to exercise the Tx path
+       ping -I $dev -c 3 -W 1 -i 0 $dstip >/dev/null
+
+       # does driver have correct offload info
+       diff $sysfsf - << EOF
+SA count=2 tx=3
+sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000
+sa[0]    spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
+sa[0]    key=0x34333231 38373635 32313039 36353433
+sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0
+sa[1]    spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1
+sa[1]    key=0x34333231 38373635 32313039 36353433
+EOF
+       if [ $? -ne 0 ] ; then
+               echo "FAIL: ipsec_offload incorrect driver data"
+               check_err 1
+       fi
+
+       # does offload get removed from driver
+       ip x s flush
+       ip x p flush
+       lines=`grep -c "SA count=0" $sysfsf`
+       if [ $lines -ne 1 ] ; then
+               echo "FAIL: ipsec_offload SA not removed from driver"
+               check_err 1
+       fi
+
+       # clean up any leftovers
+       ip link del $dev
+       rmmod netdevsim
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: ipsec_offload"
+               return 1
+       fi
+       echo "PASS: ipsec_offload"
+}
+
 kci_test_gretap()
 {
        testns="testns"
@@ -861,6 +978,7 @@ kci_test_rtnl()
        kci_test_encap
        kci_test_macsec
        kci_test_ipsec
+       kci_test_ipsec_offload
 
        kci_del_dummy
 }
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
new file mode 100644 (file)
index 0000000..b3ebf26
--- /dev/null
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <linux/tls.h>
+#include <linux/tcp.h>
+#include <linux/socket.h>
+
+#include <sys/types.h>
+#include <sys/sendfile.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+
+#include "../kselftest_harness.h"
+
+#define TLS_PAYLOAD_MAX_LEN 16384
+#define SOL_TLS 282
+
+FIXTURE(tls)
+{
+       int fd, cfd;
+       bool notls;
+};
+
+FIXTURE_SETUP(tls)
+{
+       struct tls12_crypto_info_aes_gcm_128 tls12;
+       struct sockaddr_in addr;
+       socklen_t len;
+       int sfd, ret;
+
+       self->notls = false;
+       len = sizeof(addr);
+
+       memset(&tls12, 0, sizeof(tls12));
+       tls12.info.version = TLS_1_2_VERSION;
+       tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+
+       addr.sin_family = AF_INET;
+       addr.sin_addr.s_addr = htonl(INADDR_ANY);
+       addr.sin_port = 0;
+
+       self->fd = socket(AF_INET, SOCK_STREAM, 0);
+       sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+       ret = bind(sfd, &addr, sizeof(addr));
+       ASSERT_EQ(ret, 0);
+       ret = listen(sfd, 10);
+       ASSERT_EQ(ret, 0);
+
+       ret = getsockname(sfd, &addr, &len);
+       ASSERT_EQ(ret, 0);
+
+       ret = connect(self->fd, &addr, sizeof(addr));
+       ASSERT_EQ(ret, 0);
+
+       ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+       if (ret != 0) {
+               self->notls = true;
+               printf("Failure setting TCP_ULP, testing without tls\n");
+       }
+
+       if (!self->notls) {
+               ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12,
+                                sizeof(tls12));
+               ASSERT_EQ(ret, 0);
+       }
+
+       self->cfd = accept(sfd, &addr, &len);
+       ASSERT_GE(self->cfd, 0);
+
+       if (!self->notls) {
+               ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls",
+                                sizeof("tls"));
+               ASSERT_EQ(ret, 0);
+
+               ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12,
+                                sizeof(tls12));
+               ASSERT_EQ(ret, 0);
+       }
+
+       close(sfd);
+}
+
+FIXTURE_TEARDOWN(tls)
+{
+       close(self->fd);
+       close(self->cfd);
+}
+
+TEST_F(tls, sendfile)
+{
+       int filefd = open("/proc/self/exe", O_RDONLY);
+       struct stat st;
+
+       EXPECT_GE(filefd, 0);
+       fstat(filefd, &st);
+       EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
+}
+
+TEST_F(tls, send_then_sendfile)
+{
+       int filefd = open("/proc/self/exe", O_RDONLY);
+       char const *test_str = "test_send";
+       int to_send = strlen(test_str) + 1;
+       char recv_buf[10];
+       struct stat st;
+       char *buf;
+
+       EXPECT_GE(filefd, 0);
+       fstat(filefd, &st);
+       buf = (char *)malloc(st.st_size);
+
+       EXPECT_EQ(send(self->fd, test_str, to_send, 0), to_send);
+       EXPECT_EQ(recv(self->cfd, recv_buf, to_send, 0), to_send);
+       EXPECT_EQ(memcmp(test_str, recv_buf, to_send), 0);
+
+       EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
+       EXPECT_EQ(recv(self->cfd, buf, st.st_size, 0), st.st_size);
+}
+
+TEST_F(tls, recv_max)
+{
+       unsigned int send_len = TLS_PAYLOAD_MAX_LEN;
+       char recv_mem[TLS_PAYLOAD_MAX_LEN];
+       char buf[TLS_PAYLOAD_MAX_LEN];
+
+       EXPECT_GE(send(self->fd, buf, send_len, 0), 0);
+       EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
+       EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0);
+}
+
+TEST_F(tls, recv_small)
+{
+       char const *test_str = "test_read";
+       int send_len = 10;
+       char buf[10];
+
+       send_len = strlen(test_str) + 1;
+       EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+       EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+       EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_F(tls, msg_more)
+{
+       char const *test_str = "test_read";
+       int send_len = 10;
+       char buf[10 * 2];
+
+       EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
+       EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1);
+       EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+       EXPECT_EQ(recv(self->cfd, buf, send_len * 2, MSG_DONTWAIT),
+                 send_len * 2);
+       EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_F(tls, sendmsg_single)
+{
+       struct msghdr msg;
+
+       char const *test_str = "test_sendmsg";
+       size_t send_len = 13;
+       struct iovec vec;
+       char buf[13];
+
+       vec.iov_base = (char *)test_str;
+       vec.iov_len = send_len;
+       memset(&msg, 0, sizeof(struct msghdr));
+       msg.msg_iov = &vec;
+       msg.msg_iovlen = 1;
+       EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+       EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
+       EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_F(tls, sendmsg_large)
+{
+       void *mem = malloc(16384);
+       size_t send_len = 16384;
+       size_t sends = 128;
+       struct msghdr msg;
+       size_t recvs = 0;
+       size_t sent = 0;
+
+       memset(&msg, 0, sizeof(struct msghdr));
+       while (sent++ < sends) {
+               struct iovec vec = { (void *)mem, send_len };
+
+               msg.msg_iov = &vec;
+               msg.msg_iovlen = 1;
+               EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
+       }
+
+       while (recvs++ < sends)
+               EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
+
+       free(mem);
+}
+
+TEST_F(tls, sendmsg_multiple)
+{
+       char const *test_str = "test_sendmsg_multiple";
+       struct iovec vec[5];
+       char *test_strs[5];
+       struct msghdr msg;
+       int total_len = 0;
+       int len_cmp = 0;
+       int iov_len = 5;
+       char *buf;
+       int i;
+
+       memset(&msg, 0, sizeof(struct msghdr));
+       for (i = 0; i < iov_len; i++) {
+               test_strs[i] = (char *)malloc(strlen(test_str) + 1);
+               snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str);
+               vec[i].iov_base = (void *)test_strs[i];
+               vec[i].iov_len = strlen(test_strs[i]) + 1;
+               total_len += vec[i].iov_len;
+       }
+       msg.msg_iov = vec;
+       msg.msg_iovlen = iov_len;
+
+       EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len);
+       buf = malloc(total_len);
+       EXPECT_NE(recv(self->fd, buf, total_len, 0), -1);
+       for (i = 0; i < iov_len; i++) {
+               EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp,
+                                strlen(test_strs[i])),
+                         0);
+               len_cmp += strlen(buf + len_cmp) + 1;
+       }
+       for (i = 0; i < iov_len; i++)
+               free(test_strs[i]);
+       free(buf);
+}
+
+TEST_F(tls, sendmsg_multiple_stress)
+{
+       char const *test_str = "abcdefghijklmno";
+       struct iovec vec[1024];
+       char *test_strs[1024];
+       int iov_len = 1024;
+       int total_len = 0;
+       char buf[1 << 14];
+       struct msghdr msg;
+       int len_cmp = 0;
+       int i;
+
+       memset(&msg, 0, sizeof(struct msghdr));
+       for (i = 0; i < iov_len; i++) {
+               test_strs[i] = (char *)malloc(strlen(test_str) + 1);
+               snprintf(test_strs[i], strlen(test_str) + 1, "%s", test_str);
+               vec[i].iov_base = (void *)test_strs[i];
+               vec[i].iov_len = strlen(test_strs[i]) + 1;
+               total_len += vec[i].iov_len;
+       }
+       msg.msg_iov = vec;
+       msg.msg_iovlen = iov_len;
+
+       EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
+       EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
+
+       for (i = 0; i < iov_len; i++)
+               len_cmp += strlen(buf + len_cmp) + 1;
+
+       for (i = 0; i < iov_len; i++)
+               free(test_strs[i]);
+}
+
+TEST_F(tls, splice_from_pipe)
+{
+       int send_len = TLS_PAYLOAD_MAX_LEN;
+       char mem_send[TLS_PAYLOAD_MAX_LEN];
+       char mem_recv[TLS_PAYLOAD_MAX_LEN];
+       int p[2];
+
+       ASSERT_GE(pipe(p), 0);
+       EXPECT_GE(write(p[1], mem_send, send_len), 0);
+       EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0);
+       EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+       EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, splice_from_pipe2)
+{
+       int send_len = 16000;
+       char mem_send[16000];
+       char mem_recv[16000];
+       int p2[2];
+       int p[2];
+
+       ASSERT_GE(pipe(p), 0);
+       ASSERT_GE(pipe(p2), 0);
+       EXPECT_GE(write(p[1], mem_send, 8000), 0);
+       EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0);
+       EXPECT_GE(write(p2[1], mem_send + 8000, 8000), 0);
+       EXPECT_GE(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 0);
+       EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+       EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, send_and_splice)
+{
+       int send_len = TLS_PAYLOAD_MAX_LEN;
+       char mem_send[TLS_PAYLOAD_MAX_LEN];
+       char mem_recv[TLS_PAYLOAD_MAX_LEN];
+       char const *test_str = "test_read";
+       int send_len2 = 10;
+       char buf[10];
+       int p[2];
+
+       ASSERT_GE(pipe(p), 0);
+       EXPECT_EQ(send(self->fd, test_str, send_len2, 0), send_len2);
+       EXPECT_NE(recv(self->cfd, buf, send_len2, 0), -1);
+       EXPECT_EQ(memcmp(test_str, buf, send_len2), 0);
+
+       EXPECT_GE(write(p[1], mem_send, send_len), send_len);
+       EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len);
+
+       EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+       EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, splice_to_pipe)
+{
+       int send_len = TLS_PAYLOAD_MAX_LEN;
+       char mem_send[TLS_PAYLOAD_MAX_LEN];
+       char mem_recv[TLS_PAYLOAD_MAX_LEN];
+       int p[2];
+
+       ASSERT_GE(pipe(p), 0);
+       EXPECT_GE(send(self->fd, mem_send, send_len, 0), 0);
+       EXPECT_GE(splice(self->cfd, NULL, p[1], NULL, send_len, 0), 0);
+       EXPECT_GE(read(p[0], mem_recv, send_len), 0);
+       EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, recvmsg_single)
+{
+       char const *test_str = "test_recvmsg_single";
+       int send_len = strlen(test_str) + 1;
+       char buf[20];
+       struct msghdr hdr;
+       struct iovec vec;
+
+       memset(&hdr, 0, sizeof(hdr));
+       EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+       vec.iov_base = (char *)buf;
+       vec.iov_len = send_len;
+       hdr.msg_iovlen = 1;
+       hdr.msg_iov = &vec;
+       EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
+       EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, recvmsg_single_max)
+{
+       int send_len = TLS_PAYLOAD_MAX_LEN;
+       char send_mem[TLS_PAYLOAD_MAX_LEN];
+       char recv_mem[TLS_PAYLOAD_MAX_LEN];
+       struct iovec vec;
+       struct msghdr hdr;
+
+       EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len);
+       vec.iov_base = (char *)recv_mem;
+       vec.iov_len = TLS_PAYLOAD_MAX_LEN;
+
+       hdr.msg_iovlen = 1;
+       hdr.msg_iov = &vec;
+       EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
+       EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0);
+}
+
+TEST_F(tls, recvmsg_multiple)
+{
+       unsigned int msg_iovlen = 1024;
+       unsigned int len_compared = 0;
+       struct iovec vec[1024];
+       char *iov_base[1024];
+       unsigned int iov_len = 16;
+       int send_len = 1 << 14;
+       char buf[1 << 14];
+       struct msghdr hdr;
+       int i;
+
+       EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len);
+       for (i = 0; i < msg_iovlen; i++) {
+               iov_base[i] = (char *)malloc(iov_len);
+               vec[i].iov_base = iov_base[i];
+               vec[i].iov_len = iov_len;
+       }
+
+       hdr.msg_iovlen = msg_iovlen;
+       hdr.msg_iov = vec;
+       EXPECT_NE(recvmsg(self->cfd, &hdr, 0), -1);
+       for (i = 0; i < msg_iovlen; i++)
+               len_compared += iov_len;
+
+       for (i = 0; i < msg_iovlen; i++)
+               free(iov_base[i]);
+}
+
+TEST_F(tls, single_send_multiple_recv)
+{
+       unsigned int total_len = TLS_PAYLOAD_MAX_LEN * 2;
+       unsigned int send_len = TLS_PAYLOAD_MAX_LEN;
+       char send_mem[TLS_PAYLOAD_MAX_LEN * 2];
+       char recv_mem[TLS_PAYLOAD_MAX_LEN * 2];
+
+       EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
+       memset(recv_mem, 0, total_len);
+
+       EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1);
+       EXPECT_NE(recv(self->cfd, recv_mem + send_len, send_len, 0), -1);
+       EXPECT_EQ(memcmp(send_mem, recv_mem, total_len), 0);
+}
+
+TEST_F(tls, multiple_send_single_recv)
+{
+       unsigned int total_len = 2 * 10;
+       unsigned int send_len = 10;
+       char recv_mem[2 * 10];
+       char send_mem[10];
+
+       EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
+       EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
+       memset(recv_mem, 0, total_len);
+       EXPECT_EQ(recv(self->cfd, recv_mem, total_len, 0), total_len);
+
+       EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0);
+       EXPECT_EQ(memcmp(send_mem, recv_mem + send_len, send_len), 0);
+}
+
+TEST_F(tls, recv_partial)
+{
+       char const *test_str = "test_read_partial";
+       char const *test_str_first = "test_read";
+       char const *test_str_second = "_partial";
+       int send_len = strlen(test_str) + 1;
+       char recv_mem[18];
+
+       memset(recv_mem, 0, sizeof(recv_mem));
+       EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+       EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), 0), -1);
+       EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
+       memset(recv_mem, 0, sizeof(recv_mem));
+       EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), 0), -1);
+       EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
+                 0);
+}
+
+TEST_F(tls, recv_nonblock)
+{
+       char buf[4096];
+       bool err;
+
+       EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_DONTWAIT), -1);
+       err = (errno == EAGAIN || errno == EWOULDBLOCK);
+       EXPECT_EQ(err, true);
+}
+
+TEST_F(tls, recv_peek)
+{
+       char const *test_str = "test_read_peek";
+       int send_len = strlen(test_str) + 1;
+       char buf[15];
+
+       EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+       EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1);
+       EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+       memset(buf, 0, sizeof(buf));
+       EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+       EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, recv_peek_multiple)
+{
+       char const *test_str = "test_read_peek";
+       int send_len = strlen(test_str) + 1;
+       unsigned int num_peeks = 100;
+       char buf[15];
+       int i;
+
+       EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+       for (i = 0; i < num_peeks; i++) {
+               EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1);
+               EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+               memset(buf, 0, sizeof(buf));
+       }
+       EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+       EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, pollin)
+{
+       char const *test_str = "test_poll";
+       struct pollfd fd = { 0, 0, 0 };
+       char buf[10];
+       int send_len = 10;
+
+       EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+       fd.fd = self->cfd;
+       fd.events = POLLIN;
+
+       EXPECT_EQ(poll(&fd, 1, 20), 1);
+       EXPECT_EQ(fd.revents & POLLIN, 1);
+       EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
+       /* Test timing out */
+       EXPECT_EQ(poll(&fd, 1, 20), 0);
+}
+
+TEST_F(tls, poll_wait)
+{
+       char const *test_str = "test_poll_wait";
+       int send_len = strlen(test_str) + 1;
+       struct pollfd fd = { 0, 0, 0 };
+       char recv_mem[15];
+
+       fd.fd = self->cfd;
+       fd.events = POLLIN;
+       EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+       /* Set timeout to inf. secs */
+       EXPECT_EQ(poll(&fd, 1, -1), 1);
+       EXPECT_EQ(fd.revents & POLLIN, 1);
+       EXPECT_EQ(recv(self->cfd, recv_mem, send_len, 0), send_len);
+}
+
+TEST_F(tls, blocking)
+{
+       size_t data = 100000;
+       int res = fork();
+
+       EXPECT_NE(res, -1);
+
+       if (res) {
+               /* parent */
+               size_t left = data;
+               char buf[16384];
+               int status;
+               int pid2;
+
+               while (left) {
+                       int res = send(self->fd, buf,
+                                      left > 16384 ? 16384 : left, 0);
+
+                       EXPECT_GE(res, 0);
+                       left -= res;
+               }
+
+               pid2 = wait(&status);
+               EXPECT_EQ(status, 0);
+               EXPECT_EQ(res, pid2);
+       } else {
+               /* child */
+               size_t left = data;
+               char buf[16384];
+
+               while (left) {
+                       int res = recv(self->cfd, buf,
+                                      left > 16384 ? 16384 : left, 0);
+
+                       EXPECT_GE(res, 0);
+                       left -= res;
+               }
+       }
+}
+
+TEST_F(tls, nonblocking)
+{
+       size_t data = 100000;
+       int sendbuf = 100;
+       int flags;
+       int res;
+
+       flags = fcntl(self->fd, F_GETFL, 0);
+       fcntl(self->fd, F_SETFL, flags | O_NONBLOCK);
+       fcntl(self->cfd, F_SETFL, flags | O_NONBLOCK);
+
+       /* Ensure nonblocking behavior by imposing a small send
+        * buffer.
+        */
+       EXPECT_EQ(setsockopt(self->fd, SOL_SOCKET, SO_SNDBUF,
+                            &sendbuf, sizeof(sendbuf)), 0);
+
+       res = fork();
+       EXPECT_NE(res, -1);
+
+       if (res) {
+               /* parent */
+               bool eagain = false;
+               size_t left = data;
+               char buf[16384];
+               int status;
+               int pid2;
+
+               while (left) {
+                       int res = send(self->fd, buf,
+                                      left > 16384 ? 16384 : left, 0);
+
+                       if (res == -1 && errno == EAGAIN) {
+                               eagain = true;
+                               usleep(10000);
+                               continue;
+                       }
+                       EXPECT_GE(res, 0);
+                       left -= res;
+               }
+
+               EXPECT_TRUE(eagain);
+               pid2 = wait(&status);
+
+               EXPECT_EQ(status, 0);
+               EXPECT_EQ(res, pid2);
+       } else {
+               /* child */
+               bool eagain = false;
+               size_t left = data;
+               char buf[16384];
+
+               while (left) {
+                       int res = recv(self->cfd, buf,
+                                      left > 16384 ? 16384 : left, 0);
+
+                       if (res == -1 && errno == EAGAIN) {
+                               eagain = true;
+                               usleep(10000);
+                               continue;
+                       }
+                       EXPECT_GE(res, 0);
+                       left -= res;
+               }
+               EXPECT_TRUE(eagain);
+       }
+}
+
+TEST_F(tls, control_msg)
+{
+       if (self->notls)
+               return;
+
+       char cbuf[CMSG_SPACE(sizeof(char))];
+       char const *test_str = "test_read";
+       int cmsg_len = sizeof(char);
+       char record_type = 100;
+       struct cmsghdr *cmsg;
+       struct msghdr msg;
+       int send_len = 10;
+       struct iovec vec;
+       char buf[10];
+
+       vec.iov_base = (char *)test_str;
+       vec.iov_len = 10;
+       memset(&msg, 0, sizeof(struct msghdr));
+       msg.msg_iov = &vec;
+       msg.msg_iovlen = 1;
+       msg.msg_control = cbuf;
+       msg.msg_controllen = sizeof(cbuf);
+       cmsg = CMSG_FIRSTHDR(&msg);
+       cmsg->cmsg_level = SOL_TLS;
+       /* test sending non-record types. */
+       cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
+       cmsg->cmsg_len = CMSG_LEN(cmsg_len);
+       *CMSG_DATA(cmsg) = record_type;
+       msg.msg_controllen = cmsg->cmsg_len;
+
+       EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+       /* Should fail because we didn't provide a control message */
+       EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
+
+       vec.iov_base = buf;
+       EXPECT_EQ(recvmsg(self->cfd, &msg, 0), send_len);
+       cmsg = CMSG_FIRSTHDR(&msg);
+       EXPECT_NE(cmsg, NULL);
+       EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
+       EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
+       record_type = *((unsigned char *)CMSG_DATA(cmsg));
+       EXPECT_EQ(record_type, 100);
+       EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+}
+
+TEST_HARNESS_MAIN
index 6ccb154cb4aa4f36184811d406ed9f4317647e4f..22f8df1ad7d484418235b6dadd290baca3bf3c6c 100755 (executable)
@@ -7,13 +7,16 @@
 #
 # Released under the terms of the GPL v2.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 . ./common_tests
 
 if [ -e $REBOOT_FLAG  ]; then
     rm $REBOOT_FLAG
 else
     prlog "pstore_crash_test has not been executed yet. we skip further tests."
-    exit 0
+    exit $ksft_skip
 fi
 
 prlog -n "Mounting pstore filesystem ... "
index 6a9f602a8718691b086b08544e41aa0a17667e18..615252331813416675184c19066730624e53a96c 100644 (file)
@@ -137,6 +137,30 @@ unsigned int yield_mod_cnt, nr_abort;
        "subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \
        "bne 222b\n\t" \
        "333:\n\t"
+
+#elif defined(__mips__)
+
+#define RSEQ_INJECT_INPUT \
+       , [loop_cnt_1]"m"(loop_cnt[1]) \
+       , [loop_cnt_2]"m"(loop_cnt[2]) \
+       , [loop_cnt_3]"m"(loop_cnt[3]) \
+       , [loop_cnt_4]"m"(loop_cnt[4]) \
+       , [loop_cnt_5]"m"(loop_cnt[5]) \
+       , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "$5"
+
+#define RSEQ_INJECT_CLOBBER \
+       , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+       "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+       "beqz " INJECT_ASM_REG ", 333f\n\t" \
+       "222:\n\t" \
+       "addiu " INJECT_ASM_REG ", -1\n\t" \
+       "bnez " INJECT_ASM_REG ", 222b\n\t" \
+       "333:\n\t"
+
 #else
 #error unsupported target
 #endif
index 3b055f9aeaab56bcbe91f9bc493ac2d08c527074..3cea19877227a03c4c501bffa6a687cbe32ad126 100644 (file)
@@ -57,6 +57,7 @@ do {                                                                  \
 #define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown,          \
                                abort_label, version, flags,            \
                                start_ip, post_commit_offset, abort_ip) \
+               ".balign 32\n\t"                                        \
                __rseq_str(table_label) ":\n\t"                         \
                ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
                ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h
new file mode 100644 (file)
index 0000000..7f48ecf
--- /dev/null
@@ -0,0 +1,725 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Author: Paul Burton <paul.burton@mips.com>
+ * (C) Copyright 2018 MIPS Tech LLC
+ *
+ * Based on rseq-arm.h:
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define RSEQ_SIG       0x53053053
+
+#define rseq_smp_mb()  __asm__ __volatile__ ("sync" ::: "memory")
+#define rseq_smp_rmb() rseq_smp_mb()
+#define rseq_smp_wmb() rseq_smp_mb()
+
+#define rseq_smp_load_acquire(p)                                       \
+__extension__ ({                                                       \
+       __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);                       \
+       rseq_smp_mb();                                                  \
+       ____p1;                                                         \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep()     rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v)                                   \
+do {                                                                   \
+       rseq_smp_mb();                                                  \
+       RSEQ_WRITE_ONCE(*p, v);                                         \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#if _MIPS_SZLONG == 64
+# define LONG                  ".dword"
+# define LONG_LA               "dla"
+# define LONG_L                        "ld"
+# define LONG_S                        "sd"
+# define LONG_ADDI             "daddiu"
+# define U32_U64_PAD(x)                x
+#elif _MIPS_SZLONG == 32
+# define LONG                  ".word"
+# define LONG_LA               "la"
+# define LONG_L                        "lw"
+# define LONG_S                        "sw"
+# define LONG_ADDI             "addiu"
+# ifdef __BIG_ENDIAN
+#  define U32_U64_PAD(x)       "0x0, " x
+# else
+#  define U32_U64_PAD(x)       x ", 0x0"
+# endif
+#else
+# error unsupported _MIPS_SZLONG
+#endif
+
+#define __RSEQ_ASM_DEFINE_TABLE(version, flags,        start_ip, \
+                               post_commit_offset, abort_ip) \
+               ".pushsection __rseq_table, \"aw\"\n\t" \
+               ".balign 32\n\t" \
+               ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+               ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
+       __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+                               (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+               RSEQ_INJECT_ASM(1) \
+               LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \
+               LONG_S  " $4, %[" __rseq_str(rseq_cs) "]\n\t" \
+               __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+               RSEQ_INJECT_ASM(2) \
+               "lw  $4, %[" __rseq_str(current_cpu_id) "]\n\t" \
+               "bne $4, %[" __rseq_str(cpu_id) "], " __rseq_str(label) "\n\t"
+
+#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+                               abort_label, version, flags, \
+                               start_ip, post_commit_offset, abort_ip) \
+               ".balign 32\n\t" \
+               __rseq_str(table_label) ":\n\t" \
+               ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+               ".word " __rseq_str(RSEQ_SIG) "\n\t" \
+               __rseq_str(label) ":\n\t" \
+               teardown \
+               "b %l[" __rseq_str(abort_label) "]\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \
+                             start_ip, post_commit_ip, abort_ip) \
+       __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+                               abort_label, 0x0, 0x0, start_ip, \
+                               (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+               __rseq_str(label) ":\n\t" \
+               teardown \
+               "b %l[" __rseq_str(cmpfail_label) "]\n\t"
+
+#define rseq_workaround_gcc_asm_size_guess()   __asm__ __volatile__("")
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(5)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+                              off_t voffp, intptr_t *load, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "beq $4, %[expectnot], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "beq $4, %[expectnot], %l[error2]\n\t"
+#endif
+               LONG_S " $4, %[load]\n\t"
+               LONG_ADDI " $4, %[voffp]\n\t"
+               LONG_L " $4, 0($4)\n\t"
+               /* final store */
+               LONG_S " $4, %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(5)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expectnot]           "r" (expectnot),
+                 [voffp]               "Ir" (voffp),
+                 [load]                "m" (*load)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+               LONG_L " $4, %[v]\n\t"
+               LONG_ADDI " $4, %[count]\n\t"
+               /* final store */
+               LONG_S " $4, %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(4)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 [v]                   "m" (*v),
+                 [count]               "Ir" (count)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+                                intptr_t *v2, intptr_t newv2,
+                                intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* try store */
+               LONG_S " %[newv2], %[v2]\n\t"
+               RSEQ_INJECT_ASM(5)
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* try store input */
+                 [v2]                  "m" (*v2),
+                 [newv2]               "r" (newv2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+                                        intptr_t *v2, intptr_t newv2,
+                                        intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* try store */
+               LONG_S " %[newv2], %[v2]\n\t"
+               RSEQ_INJECT_ASM(5)
+               "sync\n\t"      /* full sync provides store-release */
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* try store input */
+                 [v2]                  "m" (*v2),
+                 [newv2]               "r" (newv2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+                             intptr_t *v2, intptr_t expect2,
+                             intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+               LONG_L " $4, %[v2]\n\t"
+               "bne $4, %[expect2], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+               LONG_L " $4, %[v2]\n\t"
+               "bne $4, %[expect2], %l[error3]\n\t"
+#endif
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* cmp2 input */
+                 [v2]                  "m" (*v2),
+                 [expect2]             "r" (expect2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2, error3
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("1st expected value comparison failed");
+error3:
+       rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+                                void *dst, void *src, size_t len,
+                                intptr_t newv, int cpu)
+{
+       uintptr_t rseq_scratch[3];
+
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               LONG_S " %[src], %[rseq_scratch0]\n\t"
+               LONG_S "  %[dst], %[rseq_scratch1]\n\t"
+               LONG_S " %[len], %[rseq_scratch2]\n\t"
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 5f\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 7f\n\t"
+#endif
+               /* try memcpy */
+               "beqz %[len], 333f\n\t" \
+               "222:\n\t" \
+               "lb   $4, 0(%[src])\n\t" \
+               "sb   $4, 0(%[dst])\n\t" \
+               LONG_ADDI " %[src], 1\n\t" \
+               LONG_ADDI " %[dst], 1\n\t" \
+               LONG_ADDI " %[len], -1\n\t" \
+               "bnez %[len], 222b\n\t" \
+               "333:\n\t" \
+               RSEQ_INJECT_ASM(5)
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               /* teardown */
+               LONG_L " %[len], %[rseq_scratch2]\n\t"
+               LONG_L " %[dst], %[rseq_scratch1]\n\t"
+               LONG_L " %[src], %[rseq_scratch0]\n\t"
+               "b 8f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4,
+                                     /* teardown */
+                                     LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                     LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                     LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                     abort, 1b, 2b, 4f)
+               RSEQ_ASM_DEFINE_CMPFAIL(5,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_DEFINE_CMPFAIL(6,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error1)
+               RSEQ_ASM_DEFINE_CMPFAIL(7,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error2)
+#endif
+               "8:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv),
+                 /* try memcpy input */
+                 [dst]                 "r" (dst),
+                 [src]                 "r" (src),
+                 [len]                 "r" (len),
+                 [rseq_scratch0]       "m" (rseq_scratch[0]),
+                 [rseq_scratch1]       "m" (rseq_scratch[1]),
+                 [rseq_scratch2]       "m" (rseq_scratch[2])
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+                                        void *dst, void *src, size_t len,
+                                        intptr_t newv, int cpu)
+{
+       uintptr_t rseq_scratch[3];
+
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               LONG_S " %[src], %[rseq_scratch0]\n\t"
+               LONG_S " %[dst], %[rseq_scratch1]\n\t"
+               LONG_S " %[len], %[rseq_scratch2]\n\t"
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 5f\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 7f\n\t"
+#endif
+               /* try memcpy */
+               "beqz %[len], 333f\n\t" \
+               "222:\n\t" \
+               "lb   $4, 0(%[src])\n\t" \
+               "sb   $4, 0(%[dst])\n\t" \
+               LONG_ADDI " %[src], 1\n\t" \
+               LONG_ADDI " %[dst], 1\n\t" \
+               LONG_ADDI " %[len], -1\n\t" \
+               "bnez %[len], 222b\n\t" \
+               "333:\n\t" \
+               RSEQ_INJECT_ASM(5)
+               "sync\n\t"      /* full sync provides store-release */
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               /* teardown */
+               LONG_L " %[len], %[rseq_scratch2]\n\t"
+               LONG_L " %[dst], %[rseq_scratch1]\n\t"
+               LONG_L " %[src], %[rseq_scratch0]\n\t"
+               "b 8f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4,
+                                     /* teardown */
+                                     LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                     LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                     LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                     abort, 1b, 2b, 4f)
+               RSEQ_ASM_DEFINE_CMPFAIL(5,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_DEFINE_CMPFAIL(6,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error1)
+               RSEQ_ASM_DEFINE_CMPFAIL(7,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error2)
+#endif
+               "8:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv),
+                 /* try memcpy input */
+                 [dst]                 "r" (dst),
+                 [src]                 "r" (src),
+                 [len]                 "r" (len),
+                 [rseq_scratch0]       "m" (rseq_scratch[0]),
+                 [rseq_scratch1]       "m" (rseq_scratch[1]),
+                 [rseq_scratch2]       "m" (rseq_scratch[2])
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* !RSEQ_SKIP_FASTPATH */
index 0a808575cbc443489a5713639f7076eacedbdde4..a4684112676c87073793e94b54a09a5bee8a9892 100644 (file)
@@ -73,6 +73,8 @@ extern __thread volatile struct rseq __rseq_abi;
 #include <rseq-arm.h>
 #elif defined(__PPC__)
 #include <rseq-ppc.h>
+#elif defined(__mips__)
+#include <rseq-mips.h>
 #else
 #error unsupported target
 #endif
old mode 100644 (file)
new mode 100755 (executable)
index 2082eeffd779d586b558f45883c5dc1cc08a865b..a19531dba4dc311d00e33fcb637b91bced262bf2 100644 (file)
@@ -1,7 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/x86_64/x86/)
+
+ifneq ($(ARCH),sparc64)
+nothing:
+.PHONY: all clean run_tests install
+.SILENT:
+else
+
 SUBDIRS := drivers
 
 TEST_PROGS := run.sh
 
+
 .PHONY: all clean
 
 include ../lib.mk
@@ -18,10 +29,6 @@ all:
                fi \
        done
 
-override define RUN_TESTS
-       @cd $(OUTPUT); ./run.sh
-endef
-
 override define INSTALL_RULE
        mkdir -p $(INSTALL_PATH)
        install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
@@ -33,10 +40,6 @@ override define INSTALL_RULE
        done;
 endef
 
-override define EMIT_TESTS
-       echo "./run.sh"
-endef
-
 override define CLEAN
        @for DIR in $(SUBDIRS); do              \
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
@@ -44,3 +47,4 @@ override define CLEAN
                make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
        done
 endef
+endif
index 6264f40bbdbc9dedf9fd41519823fa31771d6cd0..deb0df4155659ec1f4b13a4d74c5128673690226 100644 (file)
@@ -1,4 +1,4 @@
-
+# SPDX-License-Identifier: GPL-2.0
 INCLUDEDIR := -I.
 CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g
 
index 24cff498b31aa831b388e638929f29c36db07dbd..fc9f8cde7d4223c3fd564105942d4d648acb8627 100755 (executable)
@@ -2,6 +2,19 @@
 # SPDX-License-Identifier: GPL-2.0
 # Runs static keys kernel module tests
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_static_key_base; then
+       echo "static_key: module test_static_key_base is not found [SKIP]"
+       exit $ksft_skip
+fi
+
+if ! /sbin/modprobe -q -n test_static_keys; then
+       echo "static_key: module test_static_keys is not found [SKIP]"
+       exit $ksft_skip
+fi
+
 if /sbin/modprobe -q test_static_key_base; then
        if /sbin/modprobe -q test_static_keys; then
                echo "static_key: ok"
diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config
new file mode 100644 (file)
index 0000000..1ab7e81
--- /dev/null
@@ -0,0 +1,4 @@
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
index ec232c3cfcaac3b8f52936eabb908a0c316183f8..584eb8ea780a49220782d08e104756199fc19934 100755 (executable)
@@ -14,6 +14,9 @@
 
 # This performs a series tests against the proc sysctl interface.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 TEST_NAME="sysctl"
 TEST_DRIVER="test_${TEST_NAME}"
 TEST_DIR=$(dirname $0)
@@ -41,7 +44,7 @@ test_modprobe()
                echo "$0: $DIR not present" >&2
                echo "You must have the following enabled in your kernel:" >&2
                cat $TEST_DIR/config >&2
-               exit 1
+               exit $ksft_skip
        fi
 }
 
@@ -98,28 +101,30 @@ test_reqs()
        uid=$(id -u)
        if [ $uid -ne 0 ]; then
                echo $msg must be run as root >&2
-               exit 0
+               exit $ksft_skip
        fi
 
        if ! which perl 2> /dev/null > /dev/null; then
                echo "$0: You need perl installed"
-               exit 1
+               exit $ksft_skip
        fi
        if ! which getconf 2> /dev/null > /dev/null; then
                echo "$0: You need getconf installed"
-               exit 1
+               exit $ksft_skip
        fi
        if ! which diff 2> /dev/null > /dev/null; then
                echo "$0: You need diff installed"
-               exit 1
+               exit $ksft_skip
        fi
 }
 
 function load_req_mod()
 {
-       trap "test_modprobe" EXIT
-
        if [ ! -d $DIR ]; then
+               if ! modprobe -q -n $TEST_DRIVER; then
+                       echo "$0: module $TEST_DRIVER not found [SKIP]"
+                       exit $ksft_skip
+               fi
                modprobe $TEST_DRIVER
                if [ $? -ne 0 ]; then
                        exit
@@ -765,6 +770,7 @@ function parse_args()
 test_reqs
 allow_user_defaults
 check_production_sysctl_writes_strict
+test_modprobe
 load_req_mod
 
 trap "test_finish" EXIT
index 3a2f51fc7fd457368d7e3a92e1d8187431ce1c5d..a022792d392a9c93bf4c33d049ea7c817b85eded 100644 (file)
             "$TC actions flush action csum"
         ]
     },
+    {
+        "id": "b10b",
+        "name": "Add all 7 csum actions",
+        "category": [
+            "actions",
+            "csum"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action csum",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action csum icmp ip4h sctp igmp udplite udp tcp index 7",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action csum index 7",
+        "matchPattern": "action order [0-9]*: csum \\(iph, icmp, igmp, tcp, udp, udplite, sctp\\).*index 7 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action csum"
+        ]
+    },
     {
         "id": "ce92",
         "name": "Add csum udp action with cookie",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
new file mode 100644 (file)
index 0000000..10b2d89
--- /dev/null
@@ -0,0 +1,917 @@
+[
+    {
+        "id": "2b11",
+        "name": "Add tunnel_key set action with mandatory parameters",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action tunnel_key",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "dc6b",
+        "name": "Add tunnel_key set action with missing mandatory src_ip parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set dst_ip 20.20.20.2 id 100",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action tunnel_key",
+        "matchPattern": "action order [0-9]+: tunnel_key set.*dst_ip 20.20.20.2.*key_id 100",
+        "matchCount": "0",
+        "teardown": [
+           [
+               "$TC actions flush action tunnel_key",
+               0,
+               1,
+               255
+           ]
+        ]
+    },
+    {
+        "id": "7f25",
+        "name": "Add tunnel_key set action with missing mandatory dst_ip parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 id 100",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action tunnel_key",
+        "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*key_id 100",
+        "matchCount": "0",
+        "teardown": [
+           [
+               "$TC actions flush action tunnel_key",
+               0,
+               1,
+               255
+           ]
+        ]
+    },
+    {
+        "id": "ba4e",
+        "name": "Add tunnel_key set action with missing mandatory id parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action tunnel_key",
+        "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
+        "matchCount": "0",
+        "teardown": [
+           [
+               "$TC actions flush action tunnel_key",
+               0,
+               1,
+               255
+           ]
+        ]
+    },
+    {
+        "id": "a5e0",
+        "name": "Add tunnel_key set action with invalid src_ip parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 300.168.100.1 dst_ip 192.168.200.1 id 7 index 1",
+        "expExitCode": "1",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 300.168.100.1.*dst_ip 192.168.200.1.*key_id 7.*index 1 ref",
+        "matchCount": "0",
+        "teardown": [
+           [
+               "$TC actions flush action tunnel_key",
+               0,
+               1,
+               255
+           ]
+        ]
+    },
+    {
+        "id": "eaa8",
+        "name": "Add tunnel_key set action with invalid dst_ip parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.100.1 dst_ip 192.168.800.1 id 10 index 11",
+        "expExitCode": "1",
+        "verifyCmd": "$TC actions get action tunnel_key index 11",
+        "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 192.168.100.1.*dst_ip 192.168.800.1.*key_id 10.*index 11 ref",
+        "matchCount": "0",
+        "teardown": [
+           [
+               "$TC actions flush action tunnel_key",
+               0,
+               1,
+               255
+           ]
+        ]
+    },
+    {
+        "id": "3b09",
+        "name": "Add tunnel_key set action with invalid id parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 112233445566778899 index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 112233445566778899.*index 1 ref",
+        "matchCount": "0",
+        "teardown": [
+           [
+               "$TC actions flush action tunnel_key",
+               0,
+               1,
+               255
+           ]
+        ]
+    },
+    {
+        "id": "9625",
+        "name": "Add tunnel_key set action with invalid dst_port parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 dst_port 998877 index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 11.*dst_port 998877.*index 1 ref",
+        "matchCount": "0",
+        "teardown": [
+           [
+               "$TC actions flush action tunnel_key",
+               0,
+               1,
+               255
+           ]
+        ]
+    },
+    {
+        "id": "05af",
+        "name": "Add tunnel_key set action with optional dst_port parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.100.1 dst_ip 192.168.200.1 id 789 dst_port 4000 index 10",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 10",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.100.1.*dst_ip 192.168.200.1.*key_id 789.*dst_port 4000.*index 10 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "da80",
+        "name": "Add tunnel_key set action with index at 32-bit maximum",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 index 4294967295",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 4294967295",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*id 11.*index 4294967295 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "d407",
+        "name": "Add tunnel_key set action with index exceeding 32-bit maximum",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 11 index 4294967295678",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 4294967295678",
+        "matchPattern": "action order [0-9]+: tunnel_key set.*index 4294967295678 ref",
+        "matchCount": "0",
+        "teardown": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ]
+    },
+    {
+        "id": "5cba",
+        "name": "Add tunnel_key set action with id value at 32-bit maximum",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 4294967295 index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 4294967295.*index 1",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "e84a",
+        "name": "Add tunnel_key set action with id value exceeding 32-bit maximum",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42949672955 index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 4294967295",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42949672955.*index 1",
+        "matchCount": "0",
+        "teardown": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+       ]
+    },
+    {
+        "id": "9c19",
+        "name": "Add tunnel_key set action with dst_port value at 16-bit maximum",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 429 dst_port 65535 index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 429.*dst_port 65535.*index 1",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "3bd9",
+        "name": "Add tunnel_key set action with dst_port value exceeding 16-bit maximum",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 429 dst_port 65535789 index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 429.*dst_port 65535789.*index 1",
+        "matchCount": "0",
+        "teardown": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+       ]
+    },
+    {
+        "id": "68e2",
+        "name": "Add tunnel_key unset action",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key unset index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*unset.*index 1 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "6192",
+        "name": "Add tunnel_key unset continue action",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key unset continue index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*unset continue.*index 1 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "061d",
+        "name": "Add tunnel_key set continue action with cookie",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.10.1 dst_ip 192.168.20.2 id 123 continue index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.10.1.*dst_ip 192.168.20.2.*key_id 123.*csum continue.*index 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "8acb",
+        "name": "Add tunnel_key set continue action with invalid cookie",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 192.168.10.1 dst_ip 192.168.20.2 id 123 continue index 1 cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 192.168.10.1.*dst_ip 192.168.20.2.*key_id 123.*csum continue.*index 1.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2777888",
+        "matchCount": "0",
+        "teardown": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+       ]
+    },
+    {
+        "id": "a07e",
+        "name": "Add tunnel_key action with no set/unset command specified",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1",
+        "matchCount": "0",
+        "teardown": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ]
+    },
+    {
+        "id": "b227",
+        "name": "Add tunnel_key action with csum option",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1 csum index 99",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 99",
+        "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*csum pipe.*index 99",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "58a7",
+        "name": "Add tunnel_key action with nocsum option",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7823 nocsum index 234",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 234",
+        "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7823.*nocsum pipe.*index 234",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "2575",
+        "name": "Add tunnel_key action with not-supported parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 foobar 999 index 4",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 4",
+        "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*foobar 999.*index 4",
+        "matchCount": "0",
+        "teardown": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ]
+    },
+    {
+        "id": "7a88",
+        "name": "Add tunnel_key action with cookie parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 4",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "4f20",
+        "name": "Add tunnel_key action with a single geneve option parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022 index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022.*index 1",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "e33d",
+        "name": "Add tunnel_key action with multiple geneve options parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344 index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "0778",
+        "name": "Add tunnel_key action with invalid class geneve option parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 824212:80:00880022 index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 824212:80:00880022.*index 1",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "4ae8",
+        "name": "Add tunnel_key action with invalid type geneve option parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:4224:00880022 index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:4224:00880022.*index 1",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "4039",
+        "name": "Add tunnel_key action with short data length geneve option parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288 index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288.*index 1",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "26a6",
+        "name": "Add tunnel_key action with non-multiple of 4 data length geneve option parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288428822 index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288428822.*index 1",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "f44d",
+        "name": "Add tunnel_key action with incomplete geneve options parameter",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42: index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:.*index 1",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "7afc",
+        "name": "Replace tunnel_key set action with all parameters",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 csum id 1 index 1"
+        ],
+        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 nocsum id 11 index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*nocsum pipe.*index 1",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "364d",
+        "name": "Replace tunnel_key set action with all parameters and cookie",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie aabbccddeeff112233445566778800a"
+        ],
+        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie a1b1c1d1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action tunnel_key index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie a1b1c1d1",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "937c",
+        "name": "Fetch all existing tunnel_key actions",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pipe index 1",
+            "$TC actions add action tunnel_key set src_ip 11.10.10.1 dst_ip 21.20.20.2 dst_port 3129 csum id 2 jump 10 index 2",
+            "$TC actions add action tunnel_key set src_ip 12.10.10.1 dst_ip 22.20.20.2 dst_port 3130 csum id 3 pass index 3",
+            "$TC actions add action tunnel_key set src_ip 13.10.10.1 dst_ip 23.20.20.2 dst_port 3131 nocsum id 4 continue index 4"
+        ],
+        "cmdUnderTest": "$TC actions list action tunnel_key",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action tunnel_key",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*nocsum pipe.*index 1.*set.*src_ip 11.10.10.1.*dst_ip 21.20.20.2.*key_id 2.*dst_port 3129.*csum jump 10.*index 2.*set.*src_ip 12.10.10.1.*dst_ip 22.20.20.2.*key_id 3.*dst_port 3130.*csum pass.*index 3.*set.*src_ip 13.10.10.1.*dst_ip 23.20.20.2.*key_id 4.*dst_port 3131.*nocsum continue.*index 4",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
+    },
+    {
+        "id": "6783",
+        "name": "Flush all existing tunnel_key actions",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pipe index 1",
+            "$TC actions add action tunnel_key set src_ip 11.10.10.1 dst_ip 21.20.20.2 dst_port 3129 csum id 2 reclassify index 2",
+            "$TC actions add action tunnel_key set src_ip 12.10.10.1 dst_ip 22.20.20.2 dst_port 3130 csum id 3 pass index 3",
+            "$TC actions add action tunnel_key set src_ip 13.10.10.1 dst_ip 23.20.20.2 dst_port 3131 nocsum id 4 continue index 4"
+        ],
+        "cmdUnderTest": "$TC actions flush action tunnel_key",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions list action tunnel_key",
+        "matchPattern": "action order [0-9]+:.*",
+        "matchCount": "0",
+        "teardown": [
+           "$TC actions flush action tunnel_key"
+       ]
+    }
+]
index d60506fc77f8bcba61f222db0b0df05a38e2e68b..f9b31a57439b759c1813ca94ac948a998e9dca51 100755 (executable)
@@ -2,6 +2,13 @@
 # SPDX-License-Identifier: GPL-2.0
 # Runs copy_to/from_user infrastructure using test_user_copy kernel module
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_user_copy; then
+       echo "user: module test_user_copy is not found [SKIP]"
+       exit $ksft_skip
+fi
 if /sbin/modprobe -q test_user_copy; then
        /sbin/modprobe -q -r test_user_copy
        echo "user_copy: ok"
index 1097f04e4d80e6cff93bb9e912c4c38894cd5959..bcec71250873108efdeae50eab0874b3924204c4 100644 (file)
@@ -16,6 +16,8 @@
 #include <unistd.h>
 #include <string.h>
 
+#include "../kselftest.h"
+
 #define MAP_SIZE 1048576
 
 struct map_list {
@@ -169,7 +171,7 @@ int main(int argc, char **argv)
                printf("Either the sysctl compact_unevictable_allowed is not\n"
                       "set to 1 or couldn't read the proc file.\n"
                       "Skipping the test\n");
-               return 0;
+               return KSFT_SKIP;
        }
 
        lim.rlim_cur = RLIM_INFINITY;
index 4997b9222cfa5055f9c07f4f1f0a1454bae89d6e..637b6d0ac0d0bf63d88ff5f5782a65453b486a7a 100644 (file)
@@ -9,6 +9,8 @@
 #include <stdbool.h>
 #include "mlock2.h"
 
+#include "../kselftest.h"
+
 struct vm_boundaries {
        unsigned long start;
        unsigned long end;
@@ -303,7 +305,7 @@ static int test_mlock_lock()
        if (mlock2_(map, 2 * page_size, 0)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(0)");
                goto unmap;
@@ -412,7 +414,7 @@ static int test_mlock_onfault()
        if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(MLOCK_ONFAULT)");
                goto unmap;
@@ -425,7 +427,7 @@ static int test_mlock_onfault()
        if (munlock(map, 2 * page_size)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("munlock()");
                goto unmap;
@@ -457,7 +459,7 @@ static int test_lock_onfault_of_present()
        if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(MLOCK_ONFAULT)");
                goto unmap;
@@ -583,7 +585,7 @@ static int test_vma_management(bool call_mlock)
        if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock(ONFAULT)\n");
                goto out;
index 22d56467383029b24b52b95e6ee09cc0bb6bf835..88cbe5575f0cf9e0d8f165ecbe27002a3c5ed8a1 100755 (executable)
@@ -2,6 +2,9 @@
 # SPDX-License-Identifier: GPL-2.0
 #please run as root
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 mnt=./huge
 exitcode=0
 
@@ -36,7 +39,7 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
                echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
                if [ $? -ne 0 ]; then
                        echo "Please run this test as root"
-                       exit 1
+                       exit $ksft_skip
                fi
                while read name size unit; do
                        if [ "$name" = "HugePages_Free:" ]; then
index de2f9ec8a87fb342a7a595a13b009358d9eae000..7b8171e3128a8715a62a10e020c69ea3ca1c5321 100644 (file)
@@ -69,6 +69,8 @@
 #include <setjmp.h>
 #include <stdbool.h>
 
+#include "../kselftest.h"
+
 #ifdef __NR_userfaultfd
 
 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
@@ -1322,7 +1324,7 @@ int main(int argc, char **argv)
 int main(void)
 {
        printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
-       return 0;
+       return KSFT_SKIP;
 }
 
 #endif /* __NR_userfaultfd */
index 246145b84a127c341fd1fdc4fb41bcf6c7d51644..4d9dc3f2fd7048212181c51f03cef4d1650e07c9 100644 (file)
@@ -610,21 +610,41 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
         */
        for (int i = 0; i < NGREG; i++) {
                greg_t req = requested_regs[i], res = resulting_regs[i];
+
                if (i == REG_TRAPNO || i == REG_IP)
                        continue;       /* don't care */
-               if (i == REG_SP) {
-                       printf("\tSP: %llx -> %llx\n", (unsigned long long)req,
-                              (unsigned long long)res);
 
+               if (i == REG_SP) {
                        /*
-                        * In many circumstances, the high 32 bits of rsp
-                        * are zeroed.  For example, we could be a real
-                        * 32-bit program, or we could hit any of a number
-                        * of poorly-documented IRET or segmented ESP
-                        * oddities.  If this happens, it's okay.
+                        * If we were using a 16-bit stack segment, then
+                        * the kernel is a bit stuck: IRET only restores
+                        * the low 16 bits of ESP/RSP if SS is 16-bit.
+                        * The kernel uses a hack to restore bits 31:16,
+                        * but that hack doesn't help with bits 63:32.
+                        * On Intel CPUs, bits 63:32 end up zeroed, and, on
+                        * AMD CPUs, they leak the high bits of the kernel
+                        * espfix64 stack pointer.  There's very little that
+                        * the kernel can do about it.
+                        *
+                        * Similarly, if we are returning to a 32-bit context,
+                        * the CPU will often lose the high 32 bits of RSP.
                         */
-                       if (res == (req & 0xFFFFFFFF))
-                               continue;  /* OK; not expected to work */
+
+                       if (res == req)
+                               continue;
+
+                       if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) {
+                               printf("[NOTE]\tSP: %llx -> %llx\n",
+                                      (unsigned long long)req,
+                                      (unsigned long long)res);
+                               continue;
+                       }
+
+                       printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n",
+                              (unsigned long long)requested_regs[i],
+                              (unsigned long long)resulting_regs[i]);
+                       nerrs++;
+                       continue;
                }
 
                bool ignore_reg = false;
@@ -654,25 +674,18 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
 #endif
 
                /* Sanity check on the kernel */
-               if (i == REG_CX && requested_regs[i] != resulting_regs[i]) {
+               if (i == REG_CX && req != res) {
                        printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
-                              (unsigned long long)requested_regs[i],
-                              (unsigned long long)resulting_regs[i]);
+                              (unsigned long long)req,
+                              (unsigned long long)res);
                        nerrs++;
                        continue;
                }
 
-               if (requested_regs[i] != resulting_regs[i] && !ignore_reg) {
-                       /*
-                        * SP is particularly interesting here.  The
-                        * usual cause of failures is that we hit the
-                        * nasty IRET case of returning to a 16-bit SS,
-                        * in which case bits 16:31 of the *kernel*
-                        * stack pointer persist in ESP.
-                        */
+               if (req != res && !ignore_reg) {
                        printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
-                              i, (unsigned long long)requested_regs[i],
-                              (unsigned long long)resulting_regs[i]);
+                              i, (unsigned long long)req,
+                              (unsigned long long)res);
                        nerrs++;
                }
        }
index 754de7da426a80a2ae386042d30a5904b44446e6..232e958ec454756501f2caa8eaf2133067fe10ac 100755 (executable)
@@ -2,6 +2,9 @@
 # SPDX-License-Identifier: GPL-2.0
 TCID="zram.sh"
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 . ./zram_lib.sh
 
 run_zram () {
@@ -24,5 +27,5 @@ elif [ -b /dev/zram0 ]; then
 else
        echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
        echo "$TCID : CONFIG_ZRAM is not set"
-       exit 1
+       exit $ksft_skip
 fi
index f6a9c73e7a442e7988b0820ebc809a342981df91..9e73a4fb9b0aa9b2a2e81368badfbe278876695d 100755 (executable)
@@ -18,6 +18,9 @@ MODULE=0
 dev_makeswap=-1
 dev_mounted=-1
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 trap INT
 
 check_prereqs()
@@ -27,7 +30,7 @@ check_prereqs()
 
        if [ $uid -ne 0 ]; then
                echo $msg must be run as root >&2
-               exit 0
+               exit $ksft_skip
        fi
 }
 
index 9a45f90e2d08974c42c6e6dc242b5cfd35d5e120..369ee308b6686ca4a106581b91f8d382e45c79e8 100644 (file)
@@ -36,7 +36,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
         */
        BUG_ON((unsigned long) page & 0x03);
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        sg->page_link = page_link | (unsigned long) page;
@@ -67,7 +66,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
 static inline struct page *sg_page(struct scatterlist *sg)
 {
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        return (struct page *)((sg)->page_link & ~0x3);
@@ -116,9 +114,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
  **/
 static inline void sg_mark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        /*
         * Set termination bit, clear potential chain bit
         */
@@ -136,17 +131,11 @@ static inline void sg_mark_end(struct scatterlist *sg)
  **/
 static inline void sg_unmark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        sg->page_link &= ~0x02;
 }
 
 static inline struct scatterlist *sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        if (sg_is_last(sg))
                return NULL;
 
@@ -160,13 +149,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
 static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
 {
        memset(sgl, 0, sizeof(*sgl) * nents);
-#ifdef CONFIG_DEBUG_SG
-       {
-               unsigned int i;
-               for (i = 0; i < nents; i++)
-                       sgl[i].sg_magic = SG_MAGIC;
-       }
-#endif
        sg_mark_end(&sgl[nents - 1]);
 }
 
index 72143cfaf6ec39404dad5f72a8cf08c5e5fefc7e..ea434ddc849925c6e2577a9ed6acea906ea8eafd 100644 (file)
@@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT
 
 config KVM_COMPAT
        def_bool y
-       depends on KVM && COMPAT && !S390
+       depends on KVM && COMPAT && !(S390 || ARM64)
 
 config HAVE_KVM_IRQ_BYPASS
        bool
index 8d90de213ce9b89340b7dc11927862f8344829c7..1d90d79706bd5b71d3914ecd808d2bd6c127286c 100644 (file)
@@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
        phys_addr_t next;
 
        assert_spin_locked(&kvm->mmu_lock);
+       WARN_ON(size & ~PAGE_MASK);
+
        pgd = kvm->arch.pgd + stage2_pgd_index(addr);
        do {
                /*
index ff7dc890941a8447d6e5abeae6dfe6544fac18d7..cdce653e3c47fb31b9eb0ccf73c3bebd830d8496 100644 (file)
@@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
                pr_warn("GICV physical address 0x%llx not page aligned\n",
                        (unsigned long long)info->vcpu.start);
                kvm_vgic_global_state.vcpu_base = 0;
-       } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
-               pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
-                       (unsigned long long)resource_size(&info->vcpu),
-                       PAGE_SIZE);
-               kvm_vgic_global_state.vcpu_base = 0;
        } else {
                kvm_vgic_global_state.vcpu_base = info->vcpu.start;
                kvm_vgic_global_state.can_emulate_gicv2 = true;
index ada21f47f22b5a902e81572ba94efb16a2a7bccb..8b47507faab5b645295094992c0eaa388765f025 100644 (file)
@@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 #ifdef CONFIG_KVM_COMPAT
 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
                                  unsigned long arg);
+#define KVM_COMPAT(c)  .compat_ioctl   = (c)
+#else
+static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
+                               unsigned long arg) { return -EINVAL; }
+#define KVM_COMPAT(c)  .compat_ioctl   = kvm_no_compat_ioctl
 #endif
 static int hardware_enable_all(void);
 static void hardware_disable_all(void);
@@ -2396,11 +2401,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
 static struct file_operations kvm_vcpu_fops = {
        .release        = kvm_vcpu_release,
        .unlocked_ioctl = kvm_vcpu_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl   = kvm_vcpu_compat_ioctl,
-#endif
        .mmap           = kvm_vcpu_mmap,
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_vcpu_compat_ioctl),
 };
 
 /*
@@ -2824,10 +2827,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
 
 static const struct file_operations kvm_device_fops = {
        .unlocked_ioctl = kvm_device_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl = kvm_device_ioctl,
-#endif
        .release = kvm_device_release,
+       KVM_COMPAT(kvm_device_ioctl),
 };
 
 struct kvm_device *kvm_device_from_filp(struct file *filp)
@@ -3165,10 +3166,8 @@ static long kvm_vm_compat_ioctl(struct file *filp,
 static struct file_operations kvm_vm_fops = {
        .release        = kvm_vm_release,
        .unlocked_ioctl = kvm_vm_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl   = kvm_vm_compat_ioctl,
-#endif
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_vm_compat_ioctl),
 };
 
 static int kvm_dev_ioctl_create_vm(unsigned long type)
@@ -3259,8 +3258,8 @@ static long kvm_dev_ioctl(struct file *filp,
 
 static struct file_operations kvm_chardev_ops = {
        .unlocked_ioctl = kvm_dev_ioctl,
-       .compat_ioctl   = kvm_dev_ioctl,
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_dev_ioctl),
 };
 
 static struct miscdevice kvm_dev = {