]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'x86/urgent' into x86/cache, to pick up dependent fix
authorIngo Molnar <mingo@kernel.org>
Tue, 9 Oct 2018 06:50:10 +0000 (08:50 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 9 Oct 2018 06:50:10 +0000 (08:50 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
409 files changed:
Documentation/devicetree/bindings/input/gpio-keys.txt
Documentation/driver-api/fpga/fpga-mgr.rst
Documentation/fb/uvesafb.txt
Documentation/networking/ip-sysctl.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
arch/arm/boot/dts/bcm63138.dtsi
arch/arm/boot/dts/stm32mp157c.dtsi
arch/arm/boot/dts/sun8i-r40.dtsi
arch/arm/mm/ioremap.c
arch/arm/tools/syscall.tbl
arch/arm64/kvm/guest.c
arch/arm64/mm/hugetlbpage.c
arch/powerpc/include/asm/setup.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/tm.S
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/lib/checksum_64.S
arch/powerpc/lib/code-patching.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pkeys.c
arch/powerpc/platforms/powernv/pci-ioda-tce.c
arch/riscv/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/riscv/kernel/setup.c
arch/x86/boot/compressed/mem_encrypt.S
arch/x86/entry/vdso/Makefile
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/events/amd/uncore.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/uv/uv.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/intel_rdt.h
arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
arch/x86/kernel/tsc.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
block/blk-mq-tag.c
block/blk-mq.c
block/elevator.c
drivers/base/firmware_loader/main.c
drivers/base/power/main.c
drivers/block/xen-blkfront.c
drivers/clocksource/timer-atmel-pit.c
drivers/clocksource/timer-fttmr010.c
drivers/clocksource/timer-ti-32k.c
drivers/cpufreq/qcom-cpufreq-kryo.c
drivers/crypto/caam/caamalg.c
drivers/crypto/chelsio/chcr_algo.c
drivers/crypto/chelsio/chcr_crypto.h
drivers/crypto/mxs-dcp.c
drivers/crypto/qat/qat_c3xxx/adf_drv.c
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
drivers/crypto/qat/qat_c62x/adf_drv.c
drivers/crypto/qat/qat_c62xvf/adf_drv.c
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
drivers/fpga/dfl-fme-region.c
drivers/fpga/fpga-bridge.c
drivers/fpga/of-fpga-region.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_hw.h
drivers/gpu/drm/arm/malidp_mw.c
drivers/gpu/drm/arm/malidp_regs.h
drivers/gpu/drm/drm_client.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_panel.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/exynos/exynos_drm_iommu.h
drivers/gpu/drm/i2c/tda9950.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/hid/hid-ids.h
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hv/connection.c
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-isch.c
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/busses/i2c-scmi.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_uapi.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hfi1/pio.h
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/keyboard/atakbd.c
drivers/input/misc/uinput.c
drivers/input/mouse/elantech.c
drivers/input/touchscreen/egalax_ts.c
drivers/iommu/amd_iommu.c
drivers/md/bcache/bcache.h
drivers/md/bcache/journal.c
drivers/md/bcache/super.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-target.c
drivers/md/dm-mpath.c
drivers/md/dm-raid.c
drivers/md/dm-thin-metadata.c
drivers/media/v4l2-core/v4l2-event.c
drivers/media/v4l2-core/v4l2-fh.c
drivers/mmc/core/host.c
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/renesas_sdhi_sys_dmac.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/b53/b53_common.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amd/declance.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/huawei/hinic/hinic_main.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/falcon/efx.c
drivers/net/hamradio/yam.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/ca8210.c
drivers/net/ieee802154/mcr20a.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/asix_common.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/sr9800.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wimax/i2400m/control.c
drivers/net/wireless/broadcom/b43/dma.c
drivers/net/wireless/intel/iwlwifi/cfg/1000.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt76/mt76x0/main.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/hash.c
drivers/net/xen-netback/interface.c
drivers/nvme/host/multipath.c
drivers/pci/controller/dwc/pcie-designware.c
drivers/pci/controller/dwc/pcie-designware.h
drivers/pci/controller/pci-mvebu.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pci.c
drivers/pinctrl/intel/pinctrl-cannonlake.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/pinctrl-amd.c
drivers/regulator/bd71837-regulator.c
drivers/regulator/core.c
drivers/regulator/of_regulator.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.c
drivers/s390/net/qeth_core_mpc.h
drivers/scsi/qedi/qedi_main.c
drivers/soc/fsl/qbman/qman.c
drivers/soc/fsl/qe/ucc.c
drivers/spi/spi-gpio.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-tegra20-slink.c
drivers/thunderbolt/icm.c
drivers/thunderbolt/nhi.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/sh-sci.c
drivers/usb/class/cdc-acm.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci-pci.c
drivers/usb/serial/option.c
drivers/usb/serial/usb-serial-simple.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
drivers/video/fbdev/pxa168fb.c
drivers/video/fbdev/stifb.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/smb2ops.c
fs/cifs/transport.c
fs/dax.c
fs/ext2/inode.c
fs/ioctl.c
fs/iomap.c
fs/nfsd/vfs.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/refcounttree.c
fs/overlayfs/copy_up.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/util.c
fs/proc/base.c
fs/pstore/ram.c
fs/read_write.c
fs/xattr.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_attr_remote.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/scrub/alloc.c
fs/xfs/scrub/inode.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_inode.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_reflink.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans_buf.c
include/drm/drm_client.h
include/drm/drm_panel.h
include/linux/fpga/fpga-mgr.h
include/linux/fs.h
include/linux/hugetlb.h
include/linux/mfd/rohm-bd718x7.h
include/linux/mlx5/transobj.h
include/linux/mm.h
include/linux/mmzone.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/regulator/machine.h
include/linux/serial_sci.h
include/linux/spi/spi-mem.h
include/linux/virtio_net.h
include/media/v4l2-fh.h
include/net/bonding.h
include/net/cfg80211.h
include/net/inet_sock.h
include/net/netlink.h
include/trace/events/migrate.h
include/trace/events/rxrpc.h
include/uapi/asm-generic/hugetlb_encode.h
include/uapi/linux/memfd.h
include/uapi/linux/mman.h
include/uapi/linux/shm.h
ipc/shm.c
kernel/bpf/local_storage.c
kernel/bpf/verifier.c
kernel/dma/Kconfig
kernel/events/core.c
kernel/locking/test-ww_mutex.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/sched.h
lib/xz/xz_crc32.c
lib/xz/xz_private.h
mm/gup_benchmark.c
mm/huge_memory.c
mm/hugetlb.c
mm/madvise.c
mm/migrate.c
mm/page_alloc.c
mm/rmap.c
mm/vmscan.c
mm/vmstat.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/bluetooth/smp.h
net/bpfilter/bpfilter_kern.c
net/bridge/br_netfilter_hooks.c
net/core/ethtool.c
net/core/netpoll.c
net/core/rtnetlink.c
net/dccp/input.c
net/dccp/ipv4.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_sockglue.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_transport.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_mode_transport.c
net/ipv6/xfrm6_output.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nft_osf.c
net/netfilter/nft_set_rbtree.c
net/netfilter/xt_socket.c
net/openvswitch/conntrack.c
net/packet/af_packet.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/protocol.h
net/sched/act_ipt.c
net/sched/sch_api.c
net/sctp/outqueue.c
net/tipc/bearer.c
net/tipc/link.c
net/tipc/link.h
net/tipc/node.c
net/tipc/socket.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/wext-compat.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
sound/hda/hdac_i915.c
sound/pci/hda/patch_realtek.c
tools/hv/hv_fcopy_daemon.c
tools/kvm/kvm_stat/kvm_stat
tools/testing/selftests/powerpc/alignment/Makefile
tools/testing/selftests/powerpc/benchmarks/Makefile
tools/testing/selftests/powerpc/cache_shape/Makefile
tools/testing/selftests/powerpc/copyloops/Makefile
tools/testing/selftests/powerpc/dscr/Makefile
tools/testing/selftests/powerpc/math/Makefile
tools/testing/selftests/powerpc/mm/Makefile
tools/testing/selftests/powerpc/pmu/Makefile
tools/testing/selftests/powerpc/pmu/ebb/Makefile
tools/testing/selftests/powerpc/primitives/Makefile
tools/testing/selftests/powerpc/ptrace/Makefile
tools/testing/selftests/powerpc/signal/Makefile
tools/testing/selftests/powerpc/stringloops/Makefile
tools/testing/selftests/powerpc/switch_endian/Makefile
tools/testing/selftests/powerpc/syscalls/Makefile
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/vphn/Makefile
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/x86/test_vdso.c

index 996ce84352cbf11627adc17c13d079cc17601de1..7cccc49b6beade0d60adaafc228b3ed8ecfa34e0 100644 (file)
@@ -1,4 +1,4 @@
-Device-Tree bindings for input/gpio_keys.c keyboard driver
+Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
 
 Required properties:
        - compatible = "gpio-keys";
index 4b3825da48d9de55bac336fe131879d084b8d6fb..82b6dbbd31cdea13fb3d93d1102615a638f52a16 100644 (file)
@@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver
 API for programming an FPGA
 ---------------------------
 
+FPGA Manager flags
+
+.. kernel-doc:: include/linux/fpga/fpga-mgr.h
+   :doc: FPGA Manager flags
+
 .. kernel-doc:: include/linux/fpga/fpga-mgr.h
    :functions: fpga_image_info
 
index f6362d88763b852e0704321af6d22653b95cc4d6..aa924196c36603abcc4d72a619ebce5635050ca4 100644 (file)
@@ -15,7 +15,8 @@ than x86.  Check the v86d documentation for a list of currently supported
 arches.
 
 v86d source code can be downloaded from the following website:
-  http://dev.gentoo.org/~spock/projects/uvesafb
+
+  https://github.com/mjanusz/v86d
 
 Please refer to the v86d documentation for detailed configuration and
 installation instructions.
@@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
 
 --
  Michal Januszewski <spock@gentoo.org>
- Last updated: 2009-03-30
+ Last updated: 2017-10-10
 
  Documentation of the uvesafb options is loosely based on vesafb.txt.
 
index 8313a636dd533540172859653bcfa173c1e03864..960de8fe3f401c7ce4ceee0d5d3d61cb46102319 100644 (file)
@@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
          1 - Disabled by default, enabled when an ICMP black hole detected
          2 - Always enabled, use initial MSS of tcp_base_mss.
 
-tcp_probe_interval - INTEGER
+tcp_probe_interval - UNSIGNED INTEGER
        Controls how often to start TCP Packetization-Layer Path MTU
        Discovery reprobe. The default is reprobing every 10 minutes as
        per RFC4821.
index 02a39617ec8285cc875312ddaee1aa73cac0b237..48a65c3a41898f9d747379ebd602bbe8e1ea29b2 100644 (file)
@@ -324,7 +324,6 @@ F:  Documentation/ABI/testing/sysfs-bus-acpi
 F:     Documentation/ABI/testing/configfs-acpi
 F:     drivers/pci/*acpi*
 F:     drivers/pci/*/*acpi*
-F:     drivers/pci/*/*/*acpi*
 F:     tools/power/acpi/
 
 ACPI APEI
@@ -1251,7 +1250,7 @@ N:        meson
 
 ARM/Annapurna Labs ALPINE ARCHITECTURE
 M:     Tsahee Zidenberg <tsahee@annapurnalabs.com>
-M:     Antoine Tenart <antoine.tenart@free-electrons.com>
+M:     Antoine Tenart <antoine.tenart@bootlin.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-alpine/
@@ -2956,7 +2955,6 @@ F:        include/linux/bcm963xx_tag.h
 
 BROADCOM BNX2 GIGABIT ETHERNET DRIVER
 M:     Rasesh Mody <rasesh.mody@cavium.com>
-M:     Harish Patil <harish.patil@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -2977,6 +2975,7 @@ F:        drivers/scsi/bnx2i/
 
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
 M:     Ariel Elior <ariel.elior@cavium.com>
+M:     Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
 M:     everest-linux-l2@cavium.com
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -5470,7 +5469,8 @@ S:        Odd Fixes
 F:     drivers/net/ethernet/agere/
 
 ETHERNET BRIDGE
-M:     Stephen Hemminger <stephen@networkplumber.org>
+M:     Roopa Prabhu <roopa@cumulusnetworks.com>
+M:     Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
 L:     bridge@lists.linux-foundation.org (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net:Bridge
@@ -8598,7 +8598,6 @@ F:        include/linux/spinlock*.h
 F:     arch/*/include/asm/spinlock*.h
 F:     include/linux/rwlock*.h
 F:     include/linux/mutex*.h
-F:     arch/*/include/asm/mutex*.h
 F:     include/linux/rwsem*.h
 F:     arch/*/include/asm/rwsem.h
 F:     include/linux/seqlock.h
@@ -9716,13 +9715,6 @@ Q:       http://patchwork.linuxtv.org/project/linux-media/list/
 S:     Maintained
 F:     drivers/media/dvb-frontends/mn88473*
 
-PCI DRIVER FOR MOBIVEIL PCIE IP
-M:     Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-L:     linux-pci@vger.kernel.org
-S:     Supported
-F:     Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
-F:     drivers/pci/controller/pcie-mobiveil.c
-
 MODULE SUPPORT
 M:     Jessica Yu <jeyu@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -10949,7 +10941,7 @@ M:      Willy Tarreau <willy@haproxy.com>
 M:     Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
 S:     Odd Fixes
 F:     Documentation/auxdisplay/lcd-panel-cgram.txt
-F:     drivers/misc/panel.c
+F:     drivers/auxdisplay/panel.c
 
 PARALLEL PORT SUBSYSTEM
 M:     Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -11137,6 +11129,13 @@ F:     include/uapi/linux/switchtec_ioctl.h
 F:     include/linux/switchtec.h
 F:     drivers/ntb/hw/mscc/
 
+PCI DRIVER FOR MOBIVEIL PCIE IP
+M:     Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+L:     linux-pci@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+F:     drivers/pci/controller/pcie-mobiveil.c
+
 PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
 M:     Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 M:     Jason Cooper <jason@lakedaemon.net>
@@ -11203,8 +11202,14 @@ F:     tools/pci/
 
 PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
 M:     Russell Currey <ruscur@russell.cc>
+M:     Sam Bobroff <sbobroff@linux.ibm.com>
+M:     Oliver O'Halloran <oohall@gmail.com>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Supported
+F:     Documentation/PCI/pci-error-recovery.txt
+F:     drivers/pci/pcie/aer.c
+F:     drivers/pci/pcie/dpc.c
+F:     drivers/pci/pcie/err.c
 F:     Documentation/powerpc/eeh-pci-error-recovery.txt
 F:     arch/powerpc/kernel/eeh*.c
 F:     arch/powerpc/platforms/*/eeh*.c
@@ -11973,7 +11978,7 @@ F:      Documentation/scsi/LICENSE.qla4xxx
 F:     drivers/scsi/qla4xxx/
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:     Harish Patil <harish.patil@cavium.com>
+M:     Shahed Shaikh <Shahed.Shaikh@cavium.com>
 M:     Manish Chopra <manish.chopra@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
@@ -11981,7 +11986,6 @@ S:      Supported
 F:     drivers/net/ethernet/qlogic/qlcnic/
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
-M:     Harish Patil <harish.patil@cavium.com>
 M:     Manish Chopra <manish.chopra@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
@@ -15389,7 +15393,7 @@ S:      Maintained
 UVESAFB DRIVER
 M:     Michal Januszewski <spock@gentoo.org>
 L:     linux-fbdev@vger.kernel.org
-W:     http://dev.gentoo.org/~spock/projects/uvesafb/
+W:     https://github.com/mjanusz/v86d
 S:     Maintained
 F:     Documentation/fb/uvesafb.txt
 F:     drivers/video/fbdev/uvesafb.*
index 0c90c435497921f581a04c56b5c73d51f4df7383..9b2df076885a844d22033ba5daa75f6c2eda29dc 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
index b10dccd0958f335ce3d874aa8d9eb171336a882a..3b1baa8605a77e8f724724550e5ec123df608732 100644 (file)
@@ -11,6 +11,7 @@
 #include "sama5d2-pinfunc.h"
 #include <dt-bindings/mfd/atmel-flexcom.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/at91.h>
 
 / {
        model = "Atmel SAMA5D2 PTC EK";
@@ -299,6 +300,7 @@ re_we_data {
                                                         <PIN_PA30__NWE_NANDWE>,
                                                         <PIN_PB2__NRD_NANDOE>;
                                                bias-pull-up;
+                                               atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
                                        };
 
                                        ale_cle_rdy_cs {
index 43ee992ccdcf70230cf1f50a33c3c51a6b483f2f..6df61518776f7e45ef8a290fd1920ab675ca649c 100644 (file)
@@ -106,21 +106,23 @@ gic: interrupt-controller@1e100 {
                global_timer: timer@1e200 {
                        compatible = "arm,cortex-a9-global-timer";
                        reg = <0x1e200 0x20>;
-                       interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&axi_clk>;
                };
 
                local_timer: local-timer@1e600 {
                        compatible = "arm,cortex-a9-twd-timer";
                        reg = <0x1e600 0x20>;
-                       interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+                                                 IRQ_TYPE_EDGE_RISING)>;
                        clocks = <&axi_clk>;
                };
 
                twd_watchdog: watchdog@1e620 {
                        compatible = "arm,cortex-a9-twd-wdt";
                        reg = <0x1e620 0x20>;
-                       interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+                                                 IRQ_TYPE_LEVEL_HIGH)>;
                };
 
                armpll: armpll {
@@ -158,7 +160,7 @@ timer: timer@80 {
                serial0: serial@600 {
                        compatible = "brcm,bcm6345-uart";
                        reg = <0x600 0x1b>;
-                       interrupts = <GIC_SPI 32 0>;
+                       interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&periph_clk>;
                        clock-names = "periph";
                        status = "disabled";
@@ -167,7 +169,7 @@ serial0: serial@600 {
                serial1: serial@620 {
                        compatible = "brcm,bcm6345-uart";
                        reg = <0x620 0x1b>;
-                       interrupts = <GIC_SPI 33 0>;
+                       interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&periph_clk>;
                        clock-names = "periph";
                        status = "disabled";
@@ -180,7 +182,7 @@ nand: nand@2000 {
                        reg = <0x2000 0x600>, <0xf0 0x10>;
                        reg-names = "nand", "nand-int-base";
                        status = "disabled";
-                       interrupts = <GIC_SPI 38 0>;
+                       interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "nand";
                };
 
index 661be948ab7424759ebfdb2d1c780822d17f38b0..185541a5b69fb58127136284f86341845b963af3 100644 (file)
@@ -1078,8 +1078,8 @@ spi6: spi@5c001000 {
                        interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&rcc SPI6_K>;
                        resets = <&rcc SPI6_R>;
-                       dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
-                              <&mdma1 35 0x0 0x40002 0x0 0x0 0>;
+                       dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
+                              <&mdma1 35 0x0 0x40002 0x0 0x0>;
                        dma-names = "rx", "tx";
                        status = "disabled";
                };
index ffd9f00f74a46da89d88040db9178c8ef01a37d4..5f547c161bafd23a3054b6c084599955d753314b 100644 (file)
@@ -800,8 +800,7 @@ hdmi_out: port@1 {
                };
 
                hdmi_phy: hdmi-phy@1ef0000 {
-                       compatible = "allwinner,sun8i-r40-hdmi-phy",
-                                    "allwinner,sun50i-a64-hdmi-phy";
+                       compatible = "allwinner,sun8i-r40-hdmi-phy";
                        reg = <0x01ef0000 0x10000>;
                        clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
                                 <&ccu 7>, <&ccu 16>;
index fc91205ff46cebd2218940214c12e6ab84355b21..5bf9443cfbaa63108f8bc11f4356774591e3e29c 100644 (file)
@@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
 
 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 {
-       BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
+       BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
 
        return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
                                  PCI_IO_VIRT_BASE + offset + SZ_64K,
index fbc74b5fa3ed26a4bc56657afb3691372aea431e..8edf93b4490fad24e4ef0c4195b6a535b4a87a94 100644 (file)
 396    common  pkey_free               sys_pkey_free
 397    common  statx                   sys_statx
 398    common  rseq                    sys_rseq
+399    common  io_pgetevents           sys_io_pgetevents
index 07256b08226c0c935d7ced6530a4a7a85ee4c276..a6c9fbaeaefcdd71d0ea70c8eeb89c55692f8b66 100644 (file)
@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
        return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+       u64 off = core_reg_offset_from_id(reg->id);
+       int size;
+
+       switch (off) {
+       case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+            KVM_REG_ARM_CORE_REG(regs.regs[30]):
+       case KVM_REG_ARM_CORE_REG(regs.sp):
+       case KVM_REG_ARM_CORE_REG(regs.pc):
+       case KVM_REG_ARM_CORE_REG(regs.pstate):
+       case KVM_REG_ARM_CORE_REG(sp_el1):
+       case KVM_REG_ARM_CORE_REG(elr_el1):
+       case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+            KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+               size = sizeof(__u64);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+            KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+               size = sizeof(__uint128_t);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+               size = sizeof(__u32);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (KVM_REG_SIZE(reg->id) == size &&
+           IS_ALIGNED(off, size / sizeof(__u32)))
+               return 0;
+
+       return -EINVAL;
+}
+
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
        /*
@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
                return -EFAULT;
 
@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
                return -EINVAL;
 
@@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        }
 
        if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
-               u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
+               u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
                switch (mode) {
                case PSR_AA32_MODE_USR:
+                       if (!system_supports_32bit_el0())
+                               return -EINVAL;
+                       break;
                case PSR_AA32_MODE_FIQ:
                case PSR_AA32_MODE_IRQ:
                case PSR_AA32_MODE_SVC:
                case PSR_AA32_MODE_ABT:
                case PSR_AA32_MODE_UND:
+                       if (!vcpu_el1_is_32bit(vcpu))
+                               return -EINVAL;
+                       break;
                case PSR_MODE_EL0t:
                case PSR_MODE_EL1t:
                case PSR_MODE_EL1h:
+                       if (vcpu_el1_is_32bit(vcpu))
+                               return -EINVAL;
                        break;
                default:
                        err = -EINVAL;
index 192b3ba070755f70d41f13d3c68eaa18b2b7f17d..f58ea503ad014fda52fbab06e6edc743551a4b6c 100644 (file)
@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
 
                /*
                 * If HW_AFDBM is enabled, then the HW could turn on
-                * the dirty bit for any page in the set, so check
-                * them all.  All hugetlb entries are already young.
+                * the dirty or accessed bit for any page in the set,
+                * so check them all.
                 */
                if (pte_dirty(pte))
                        orig_pte = pte_mkdirty(orig_pte);
+
+               if (pte_young(pte))
+                       orig_pte = pte_mkyoung(orig_pte);
        }
 
        if (valid) {
@@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
        return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
 }
 
+/*
+ * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
+ * and write permission.
+ *
+ * For a contiguous huge pte range we need to check whether or not write
+ * permission has to change only on the first pte in the set. Then for
+ * all the contiguous ptes we need to check whether or not there is a
+ * discrepancy between dirty or young.
+ */
+static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
+{
+       int i;
+
+       if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+               return 1;
+
+       for (i = 0; i < ncontig; i++) {
+               pte_t orig_pte = huge_ptep_get(ptep + i);
+
+               if (pte_dirty(pte) != pte_dirty(orig_pte))
+                       return 1;
+
+               if (pte_young(pte) != pte_young(orig_pte))
+                       return 1;
+       }
+
+       return 0;
+}
+
 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                               unsigned long addr, pte_t *ptep,
                               pte_t pte, int dirty)
 {
-       int ncontig, i, changed = 0;
+       int ncontig, i;
        size_t pgsize = 0;
        unsigned long pfn = pte_pfn(pte), dpfn;
        pgprot_t hugeprot;
@@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
        ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
        dpfn = pgsize >> PAGE_SHIFT;
 
+       if (!__cont_access_flags_changed(ptep, pte, ncontig))
+               return 0;
+
        orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
-       if (!pte_same(orig_pte, pte))
-               changed = 1;
 
-       /* Make sure we don't lose the dirty state */
+       /* Make sure we don't lose the dirty or young state */
        if (pte_dirty(orig_pte))
                pte = pte_mkdirty(pte);
 
+       if (pte_young(orig_pte))
+               pte = pte_mkyoung(pte);
+
        hugeprot = pte_pgprot(pte);
        for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
                set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
 
-       return changed;
+       return 1;
 }
 
 void huge_ptep_set_wrprotect(struct mm_struct *mm,
index 1a951b00465d739f0b7268803d9c1a25e92e947f..1fffbba8d6a5e64a5fefdb06a6ecab29f4ec66e5 100644 (file)
@@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
 
 extern unsigned int rtas_data;
 extern unsigned long long memory_limit;
+extern bool init_mem_is_free;
 extern unsigned long klimit;
 extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
 
index ea04dfb8c0927f71e1a89937526f3e1f2d7fd241..2d8fc8c9da7a1f210816bd9734c3d8453d8fc04e 100644 (file)
@@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
 
 #ifdef CONFIG_PPC_DENORMALISATION
        mfspr   r10,SPRN_HSRR1
-       mfspr   r11,SPRN_HSRR0          /* save HSRR0 */
        andis.  r10,r10,(HSRR1_DENORM)@h /* denorm? */
-       addi    r11,r11,-4              /* HSRR0 is next instruction */
        bne+    denorm_assist
 #endif
 
@@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  */
        XVCPSGNDP32(32)
 denorm_done:
+       mfspr   r11,SPRN_HSRR0
+       subi    r11,r11,4
        mtspr   SPRN_HSRR0,r11
        mtcrf   0x80,r9
        ld      r9,PACA_EXGEN+EX_R9(r13)
index 913c5725cdb2ad416d06513ed6a72240b4e9aa6d..bb6ac471a784e70918d25a450e31ecba3f352881 100644 (file)
@@ -1306,6 +1306,16 @@ void show_user_instructions(struct pt_regs *regs)
 
        pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
 
+       /*
+        * Make sure the NIP points at userspace, not kernel text/data or
+        * elsewhere.
+        */
+       if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
+               pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
+                       current->comm, current->pid);
+               return;
+       }
+
        pr_info("%s[%d]: code: ", current->comm, current->pid);
 
        for (i = 0; i < instructions_to_print; i++) {
index 6bffbc5affe76ba7847ceb74b69e16cc53ac4178..7716374786bd97c7e56390ea587e967d75c68a2e 100644 (file)
@@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
        std     r1, PACATMSCRATCH(r13)
        ld      r1, PACAR1(r13)
 
-       /* Store the PPR in r11 and reset to decent value */
        std     r11, GPR11(r1)                  /* Temporary stash */
 
+       /*
+        * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+        * clobbered by an exception once we turn on MSR_RI below.
+        */
+       ld      r11, PACATMSCRATCH(r13)
+       std     r11, GPR1(r1)
+
+       /*
+        * Store r13 away so we can free up the scratch SPR for the SLB fault
+        * handler (needed once we start accessing the thread_struct).
+        */
+       GET_SCRATCH0(r11)
+       std     r11, GPR13(r1)
+
        /* Reset MSR RI so we can take SLB faults again */
        li      r11, MSR_RI
        mtmsrd  r11, 1
 
+       /* Store the PPR in r11 and reset to decent value */
        mfspr   r11, SPRN_PPR
        HMT_MEDIUM
 
@@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
        SAVE_GPR(8, r7)                         /* user r8 */
        SAVE_GPR(9, r7)                         /* user r9 */
        SAVE_GPR(10, r7)                        /* user r10 */
-       ld      r3, PACATMSCRATCH(r13)          /* user r1 */
+       ld      r3, GPR1(r1)                    /* user r1 */
        ld      r4, GPR7(r1)                    /* user r7 */
        ld      r5, GPR11(r1)                   /* user r11 */
        ld      r6, GPR12(r1)                   /* user r12 */
-       GET_SCRATCH0(8)                         /* user r13 */
+       ld      r8, GPR13(r1)                   /* user r13 */
        std     r3, GPR1(r7)
        std     r4, GPR7(r7)
        std     r5, GPR11(r7)
index 933c574e1cf795d65855b60d763c62edf4d1996a..998f8d089ac7ea840341f0a741df3989d30542bc 100644 (file)
@@ -646,6 +646,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
         */
        local_irq_disable();
        ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+       /*
+        * If the PTE disappeared temporarily due to a THP
+        * collapse, just return and let the guest try again.
+        */
+       if (!ptep) {
+               local_irq_enable();
+               if (page)
+                       put_page(page);
+               return RESUME_GUEST;
+       }
        pte = *ptep;
        local_irq_enable();
 
index 886ed94b9c13307f5fc739e899274704faa89a15..d05c8af4ac51fe4c696469664e4bf6ceb9dd7d64 100644 (file)
@@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
        addc    r0, r8, r9
        ld      r10, 0(r4)
        ld      r11, 8(r4)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+       rotldi  r5, r5, 8
+#endif
        adde    r0, r0, r10
        add     r5, r5, r7
        adde    r0, r0, r11
index 850f3b8f4da5e55346afbbe939987b0a0642aa7d..5ffee298745fe4e98a66d2410d86e6579d2b2560 100644 (file)
@@ -142,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
        return 0;
 }
 
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
 {
        int err;
        unsigned int *patch_addr = NULL;
@@ -182,12 +182,22 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
 }
 #else /* !CONFIG_STRICT_KERNEL_RWX */
 
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
 {
        return raw_patch_instruction(addr, instr);
 }
 
 #endif /* CONFIG_STRICT_KERNEL_RWX */
+
+int patch_instruction(unsigned int *addr, unsigned int instr)
+{
+       /* Make sure we aren't patching a freed init section */
+       if (init_mem_is_free && init_section_contains(addr, 4)) {
+               pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+               return 0;
+       }
+       return do_patch_instruction(addr, instr);
+}
 NOKPROBE_SYMBOL(patch_instruction);
 
 int patch_branch(unsigned int *addr, unsigned long target, int flags)
index 5c8530d0c611898f012e2cc8f300a7112715c351..04ccb274a6205bba58357d5897105ada90f81c0f 100644 (file)
@@ -63,6 +63,7 @@
 #endif
 
 unsigned long long memory_limit;
+bool init_mem_is_free;
 
 #ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
@@ -396,6 +397,7 @@ void free_initmem(void)
 {
        ppc_md.progress = ppc_printk_progress;
        mark_initmem_nx();
+       init_mem_is_free = true;
        free_initmem_default(POISON_FREE_INITMEM);
 }
 
index 35ac5422903a0ee5494c92b637bcc64604c7cba3..055b211b7126694e1a97643542a8013853ce1c17 100644 (file)
@@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
        int new_nid;
 
        /* Use associativity from first thread for all siblings */
-       vphn_get_associativity(cpu, associativity);
+       if (vphn_get_associativity(cpu, associativity))
+               return cpu_to_node(cpu);
+
        new_nid = associativity_to_nid(associativity);
        if (new_nid < 0 || !node_possible(new_nid))
                new_nid = first_online_node;
@@ -1215,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
                 * Need to ensure that NODE_DATA is initialized for a node from
                 * available memory (see memblock_alloc_try_nid). If unable to
                 * init the node, then default to nearest node that has memory
-                * installed.
+                * installed. Skip onlining a node if the subsystems are not
+                * yet initialized.
                 */
-               if (try_online_node(new_nid))
+               if (!topology_inited || try_online_node(new_nid))
                        new_nid = first_online_node;
 #else
                /*
@@ -1452,7 +1455,8 @@ static struct timer_list topology_timer;
 
 static void reset_topology_timer(void)
 {
-       mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
+       if (vphn_enabled)
+               mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
 }
 
 #ifdef CONFIG_SMP
index 333b1f80c435435cbf703a477e4bb60f6181a058..b271b283c785e3a07589ea81c6b8e40e7def5a69 100644 (file)
@@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
         * Since any pkey can be used for data or execute, we will just treat
         * all keys as equal and track them as one entity.
         */
-       pkeys_total = be32_to_cpu(vals[0]);
+       pkeys_total = vals[0];
        pkeys_devtree_defined = true;
 }
 
index 6c5db1acbe8dffaba711faf55d13b2baa6c6a965..fe9691040f54c26561949c469738f277c90069e6 100644 (file)
@@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
        level_shift = entries_shift + 3;
        level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
 
-       if ((level_shift - 3) * levels + page_shift >= 60)
+       if ((level_shift - 3) * levels + page_shift >= 55)
                return -EINVAL;
 
        /* Allocate TCE table */
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..c9fecd1
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_PROTOTYPES_H
+
+#include <linux/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_RISCV_PROTOTYPES_H */
index aee6031230306a934747c64edb4b61f6928e9e8b..b2d26d9d8489c8e8b6bba01adee0c573fd6564f8 100644 (file)
@@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
        BUG_ON(mem_size == 0);
 
        set_max_mapnr(PFN_DOWN(mem_size));
-       max_low_pfn = pfn_base + PFN_DOWN(mem_size);
+       max_low_pfn = memblock_end_of_DRAM();
 
 #ifdef CONFIG_BLK_DEV_INITRD
        setup_initrd();
index eaa843a52907ffd8a166c09e98c1594627aff4ac..a480356e0ed886006749d69488c5af625828bf6d 100644 (file)
@@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
        push    %ebx
        push    %ecx
        push    %edx
-       push    %edi
-
-       /*
-        * RIP-relative addressing is needed to access the encryption bit
-        * variable. Since we are running in 32-bit mode we need this call/pop
-        * sequence to get the proper relative addressing.
-        */
-       call    1f
-1:     popl    %edi
-       subl    $1b, %edi
-
-       movl    enc_bit(%edi), %eax
-       cmpl    $0, %eax
-       jge     .Lsev_exit
 
        /* Check if running under a hypervisor */
        movl    $1, %eax
@@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
 
        movl    %ebx, %eax
        andl    $0x3f, %eax             /* Return the encryption bit location */
-       movl    %eax, enc_bit(%edi)
        jmp     .Lsev_exit
 
 .Lno_sev:
        xor     %eax, %eax
-       movl    %eax, enc_bit(%edi)
 
 .Lsev_exit:
-       pop     %edi
        pop     %edx
        pop     %ecx
        pop     %ebx
@@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
 ENDPROC(set_sev_encryption_mask)
 
        .data
-enc_bit:
-       .int    0xffffffff
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
        .balign 8
index fa3f439f0a9200321a96efd2ba874dbda4824cff..141d415a8c8098e9bd9747c94ee84e4de843c9f8 100644 (file)
@@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
        $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
        -fno-omit-frame-pointer -foptimize-sibling-calls \
-       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
+       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+  CFL += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
 
 $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
 
@@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
 KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+  KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
+
 $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 
 $(obj)/vdso32.so.dbg: FORCE \
index f19856d95c60919c92d1679e0037d9339c4c2a65..e48ca3afa0912cc8bb03bd6dba84b0999abe1982 100644 (file)
@@ -43,8 +43,9 @@ extern u8 hvclock_page
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+       asm ("syscall" : "=a" (ret), "=m" (*ts) :
+            "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+            "memory", "rcx", "r11");
        return ret;
 }
 
@@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 {
        long ret;
 
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+       asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+            "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+            "memory", "rcx", "r11");
        return ret;
 }
 
@@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
 
-       asm(
+       asm (
                "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
+               "mov %[clock], %%ebx \n"
                "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+               : "=a" (ret), "=m" (*ts)
+               : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
                : "memory", "edx");
        return ret;
 }
@@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 {
        long ret;
 
-       asm(
+       asm (
                "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
+               "mov %[tv], %%ebx \n"
                "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+               : "=a" (ret), "=m" (*tv), "=m" (*tz)
+               : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
                : "memory", "edx");
        return ret;
 }
index 981ba5e8241ba2ece923ef22f162ac3820c684c4..8671de126eac09e0a63358d72305ce0a5e9f4f31 100644 (file)
@@ -36,6 +36,7 @@
 
 static int num_counters_llc;
 static int num_counters_nb;
+static bool l3_mask;
 
 static HLIST_HEAD(uncore_unused_list);
 
@@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
        hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
        hwc->idx = -1;
 
+       /*
+        * SliceMask and ThreadMask need to be set for certain L3 events in
+        * Family 17h. For other events, the two fields do not affect the count.
+        */
+       if (l3_mask)
+               hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
+
        if (event->cpu < 0)
                return -EINVAL;
 
@@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
                amd_llc_pmu.name          = "amd_l3";
                format_attr_event_df.show = &event_show_df;
                format_attr_event_l3.show = &event_show_l3;
+               l3_mask                   = true;
        } else {
                num_counters_nb           = NUM_COUNTERS_NB;
                num_counters_llc          = NUM_COUNTERS_L2;
@@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
                amd_llc_pmu.name          = "amd_l2";
                format_attr_event_df      = format_attr_event;
                format_attr_event_l3      = format_attr_event;
+               l3_mask                   = false;
        }
 
        amd_nb_pmu.attr_groups  = amd_uncore_attr_groups_df;
index 51d7c117e3c705f82136f422c553164f72e83c8c..c07bee31abe859c61c53c499e9aabcbe11f1f07b 100644 (file)
@@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
 
 void bdx_uncore_cpu_init(void)
 {
-       int pkg = topology_phys_to_logical_pkg(0);
+       int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
 
        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
@@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
        },
        { /* M3UPI0 Link 0 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
        },
        { /* M3UPI0 Link 1 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
        },
        { /* M3UPI1 Link 2 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
        },
        { /* end: all zeroes */ }
 };
index b2cf84c35a6d100d9433ffd89112a1a21fc8fbb3..8bdf74902293489a031aa300a605447e83b96341 100644 (file)
 #define INTEL_ARCH_EVENT_MASK  \
        (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
 
+#define AMD64_L3_SLICE_SHIFT                           48
+#define AMD64_L3_SLICE_MASK                            \
+       ((0xFULL) << AMD64_L3_SLICE_SHIFT)
+
+#define AMD64_L3_THREAD_SHIFT                          56
+#define AMD64_L3_THREAD_MASK                           \
+       ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
+
 #define X86_RAW_EVENT_MASK             \
        (ARCH_PERFMON_EVENTSEL_EVENT |  \
         ARCH_PERFMON_EVENTSEL_UMASK |  \
index a80c0673798fe760f6ce86ebcaf1c9ce9c75e7a6..e60c45fd3679bf900a8e1d53d2cb731c316d519f 100644 (file)
@@ -10,8 +10,13 @@ struct cpumask;
 struct mm_struct;
 
 #ifdef CONFIG_X86_UV
+#include <linux/efi.h>
 
 extern enum uv_system_type get_uv_system_type(void);
+static inline bool is_early_uv_system(void)
+{
+       return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
+}
 extern int is_uv_system(void);
 extern int is_uv_hubless(void);
 extern void uv_cpu_init(void);
@@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 #else  /* X86_UV */
 
 static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
+static inline bool is_early_uv_system(void)    { return 0; }
 static inline int is_uv_system(void)   { return 0; }
 static inline int is_uv_hubless(void)  { return 0; }
 static inline void uv_cpu_init(void)   { }
index 22ab408177b2cd2b09c687cfda07436c4c30b3e5..eeea634bee0a73291a6f879706ef2e280f8e0d4f 100644 (file)
@@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 {
        /* AMD errata T13 (order #21922) */
-       if ((c->x86 == 6)) {
+       if (c->x86 == 6) {
                /* Duron Rev A0 */
                if (c->x86_model == 3 && c->x86_stepping == 0)
                        size = 64;
index 285eb3ec4200e5377d8462eb87ecd4a943341f6b..3736f6dc95450f6f51204946d351b47e27feacf8 100644 (file)
@@ -529,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
                           struct seq_file *s, void *v);
 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-                          u32 _cbm, int closid, bool exclusive);
+                          unsigned long cbm, int closid, bool exclusive);
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
-                                 u32 cbm);
+                                 unsigned long cbm);
 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
 int rdtgroup_tasks_assigned(struct rdtgroup *r);
 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
 int rdt_pseudo_lock_init(void);
 void rdt_pseudo_lock_release(void);
index 30e6c9f5a0ada75f9a0b7f7d85c9e05607b4368c..41aeb431e83444d0360ac83be9a31d9c838a83eb 100644 (file)
@@ -789,25 +789,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
 /**
  * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
  * @d: RDT domain
- * @_cbm: CBM to test
+ * @cbm: CBM to test
  *
- * @d represents a cache instance and @_cbm a capacity bitmask that is
- * considered for it. Determine if @_cbm overlaps with any existing
+ * @d represents a cache instance and @cbm a capacity bitmask that is
+ * considered for it. Determine if @cbm overlaps with any existing
  * pseudo-locked region on @d.
  *
- * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
+ * Return: true if @cbm overlaps with pseudo-locked region on @d, false
  * otherwise.
  */
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
 {
-       unsigned long *cbm = (unsigned long *)&_cbm;
-       unsigned long *cbm_b;
        unsigned int cbm_len;
+       unsigned long cbm_b;
 
        if (d->plr) {
                cbm_len = d->plr->r->cache.cbm_len;
-               cbm_b = (unsigned long *)&d->plr->cbm;
-               if (bitmap_intersects(cbm, cbm_b, cbm_len))
+               cbm_b = d->plr->cbm;
+               if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
                        return true;
        }
        return false;
index 82a487840eb25f71924431047488f9837d6ef149..643670fb8943486471b773a43d0f57579e7cd5d8 100644 (file)
@@ -975,33 +975,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
  * is false then overlaps with any resource group or hardware entities
  * will be considered.
  *
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
  * Return: false if CBM does not overlap, true if it does.
  */
 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-                          u32 _cbm, int closid, bool exclusive)
+                          unsigned long cbm, int closid, bool exclusive)
 {
-       unsigned long *cbm = (unsigned long *)&_cbm;
-       unsigned long *ctrl_b;
        enum rdtgrp_mode mode;
+       unsigned long ctrl_b;
        u32 *ctrl;
        int i;
 
        /* Check for any overlap with regions used by hardware directly */
        if (!exclusive) {
-               if (bitmap_intersects(cbm,
-                                     (unsigned long *)&r->cache.shareable_bits,
-                                     r->cache.cbm_len))
+               ctrl_b = r->cache.shareable_bits;
+               if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
                        return true;
        }
 
        /* Check for overlap with other resource groups */
        ctrl = d->ctrl_val;
        for (i = 0; i < closids_supported(); i++, ctrl++) {
-               ctrl_b = (unsigned long *)ctrl;
+               ctrl_b = *ctrl;
                mode = rdtgroup_mode_by_closid(i);
                if (closid_allocated(i) && i != closid &&
                    mode != RDT_MODE_PSEUDO_LOCKSETUP) {
-                       if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+                       if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
                                if (exclusive) {
                                        if (mode == RDT_MODE_EXCLUSIVE)
                                                return true;
@@ -1138,15 +1139,18 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
  * computed by first dividing the total cache size by the CBM length to
  * determine how many bytes each bit in the bitmask represents. The result
  * is multiplied with the number of bits set in the bitmask.
+ *
+ * @cbm is unsigned long, even if only 32 bits are used to make the
+ * bitmap functions work correctly.
  */
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
-                                 struct rdt_domain *d, u32 cbm)
+                                 struct rdt_domain *d, unsigned long cbm)
 {
        struct cpu_cacheinfo *ci;
        unsigned int size = 0;
        int num_b, i;
 
-       num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+       num_b = bitmap_weight(&cbm, r->cache.cbm_len);
        ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
        for (i = 0; i < ci->num_leaves; i++) {
                if (ci->info_list[i].level == r->cache_level) {
@@ -2353,6 +2357,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
        u32 used_b = 0, unused_b = 0;
        u32 closid = rdtgrp->closid;
        struct rdt_resource *r;
+       unsigned long tmp_cbm;
        enum rdtgrp_mode mode;
        struct rdt_domain *d;
        int i, ret;
@@ -2390,9 +2395,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
                         * modify the CBM based on system availability.
                         */
                        cbm_ensure_valid(&d->new_ctrl, r);
-                       if (bitmap_weight((unsigned long *) &d->new_ctrl,
-                                         r->cache.cbm_len) <
-                                       r->cache.min_cbm_bits) {
+                       /*
+                        * Assign the u32 CBM to an unsigned long to ensure
+                        * that bitmap_weight() does not access out-of-bound
+                        * memory.
+                        */
+                       tmp_cbm = d->new_ctrl;
+                       if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
+                           r->cache.min_cbm_bits) {
                                rdt_last_cmd_printf("no space on %s:%d\n",
                                                    r->name, d->id);
                                return -ENOSPC;
index 6490f618e09696a7a407859037a0b1635cbb6f9f..b52bd2b6cdb443ba0c89d78aaa52b02b82a10b6e 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/apic.h>
 #include <asm/intel-family.h>
 #include <asm/i8259.h>
+#include <asm/uv/uv.h>
 
 unsigned int __read_mostly cpu_khz;    /* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_TSC))
                return;
+       /* Don't change UV TSC multi-chassis synchronization */
+       if (is_early_uv_system())
+               return;
        if (!determine_cpu_tsc_frequencies(true))
                return;
        loops_per_jiffy = get_loops_per_jiffy();
index d7e9bce6ff61c74d3d36fe6c9d4981e7136e0405..51b953ad9d4efe0e10a032228cc1418ff22167fe 100644 (file)
@@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
  */
 static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
 
+/*
+ * In some cases, we need to preserve the GFN of a non-present or reserved
+ * SPTE when we usurp the upper five bits of the physical address space to
+ * defend against L1TF, e.g. for MMIO SPTEs.  To preserve the GFN, we'll
+ * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
+ * left into the reserved bits, i.e. the GFN in the SPTE will be split into
+ * high and low parts.  This mask covers the lower bits of the GFN.
+ */
+static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
+
+
 static void mmu_spte_set(u64 *sptep, u64 spte);
 static union kvm_mmu_page_role
 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
 
 static gfn_t get_mmio_spte_gfn(u64 spte)
 {
-       u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
-                  shadow_nonpresent_or_rsvd_mask;
-       u64 gpa = spte & ~mask;
+       u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
 
        gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
               & shadow_nonpresent_or_rsvd_mask;
@@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
 static void kvm_mmu_reset_all_pte_masks(void)
 {
+       u8 low_phys_bits;
+
        shadow_user_mask = 0;
        shadow_accessed_mask = 0;
        shadow_dirty_mask = 0;
@@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
         * appropriate mask to guard against L1TF attacks. Otherwise, it is
         * assumed that the CPU is not vulnerable to L1TF.
         */
+       low_phys_bits = boot_cpu_data.x86_phys_bits;
        if (boot_cpu_data.x86_phys_bits <
-           52 - shadow_nonpresent_or_rsvd_mask_len)
+           52 - shadow_nonpresent_or_rsvd_mask_len) {
                shadow_nonpresent_or_rsvd_mask =
                        rsvd_bits(boot_cpu_data.x86_phys_bits -
                                  shadow_nonpresent_or_rsvd_mask_len,
                                  boot_cpu_data.x86_phys_bits - 1);
+               low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
+       }
+       shadow_nonpresent_or_rsvd_lower_gfn_mask =
+               GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
 }
 
 static int is_cpuid_PSE36(void)
index 06412ba46aa36eaca6cd1f111a6b8df6d795d969..612fd17be6351c48544abc36884df1c7669727da 100644 (file)
@@ -121,7 +121,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
 
 #define MSR_BITMAP_MODE_X2APIC         1
 #define MSR_BITMAP_MODE_X2APIC_APICV   2
-#define MSR_BITMAP_MODE_LM             4
 
 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
 
@@ -857,6 +856,7 @@ struct nested_vmx {
 
        /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
        u64 vmcs01_debugctl;
+       u64 vmcs01_guest_bndcfgs;
 
        u16 vpid02;
        u16 last_vpid;
@@ -2899,8 +2899,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
                vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
        }
 
-       if (is_long_mode(&vmx->vcpu))
-               wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #else
        savesegment(fs, fs_sel);
        savesegment(gs, gs_sel);
@@ -2951,8 +2950,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
        vmx->loaded_cpu_state = NULL;
 
 #ifdef CONFIG_X86_64
-       if (is_long_mode(&vmx->vcpu))
-               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #endif
        if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
                kvm_load_ldt(host_state->ldt_sel);
@@ -2980,24 +2978,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
 #ifdef CONFIG_X86_64
 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
 {
-       if (is_long_mode(&vmx->vcpu)) {
-               preempt_disable();
-               if (vmx->loaded_cpu_state)
-                       rdmsrl(MSR_KERNEL_GS_BASE,
-                              vmx->msr_guest_kernel_gs_base);
-               preempt_enable();
-       }
+       preempt_disable();
+       if (vmx->loaded_cpu_state)
+               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       preempt_enable();
        return vmx->msr_guest_kernel_gs_base;
 }
 
 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
 {
-       if (is_long_mode(&vmx->vcpu)) {
-               preempt_disable();
-               if (vmx->loaded_cpu_state)
-                       wrmsrl(MSR_KERNEL_GS_BASE, data);
-               preempt_enable();
-       }
+       preempt_disable();
+       if (vmx->loaded_cpu_state)
+               wrmsrl(MSR_KERNEL_GS_BASE, data);
+       preempt_enable();
        vmx->msr_guest_kernel_gs_base = data;
 }
 #endif
@@ -3533,9 +3526,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
                VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
 
-       if (kvm_mpx_supported())
-               msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
-
        /* We support free control of debug control saving. */
        msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
 
@@ -3552,8 +3542,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                VM_ENTRY_LOAD_IA32_PAT;
        msrs->entry_ctls_high |=
                (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
-       if (kvm_mpx_supported())
-               msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
 
        /* We support free control of debug control loading. */
        msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
@@ -3601,12 +3589,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                msrs->secondary_ctls_high);
        msrs->secondary_ctls_low = 0;
        msrs->secondary_ctls_high &=
-               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                SECONDARY_EXEC_DESC |
                SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
                SECONDARY_EXEC_APIC_REGISTER_VIRT |
                SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
                SECONDARY_EXEC_WBINVD_EXITING;
+
        /*
         * We can emulate "VMCS shadowing," even if the hardware
         * doesn't support it.
@@ -3663,6 +3651,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                msrs->secondary_ctls_high |=
                        SECONDARY_EXEC_UNRESTRICTED_GUEST;
 
+       if (flexpriority_enabled)
+               msrs->secondary_ctls_high |=
+                       SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+
        /* miscellaneous data */
        rdmsr(MSR_IA32_VMX_MISC,
                msrs->misc_low,
@@ -5073,19 +5065,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
        if (!msr)
                return;
 
-       /*
-        * MSR_KERNEL_GS_BASE is not intercepted when the guest is in
-        * 64-bit mode as a 64-bit kernel may frequently access the
-        * MSR.  This means we need to manually save/restore the MSR
-        * when switching between guest and host state, but only if
-        * the guest is in 64-bit mode.  Sync our cached value if the
-        * guest is transitioning to 32-bit mode and the CPU contains
-        * guest state, i.e. the cache is stale.
-        */
-#ifdef CONFIG_X86_64
-       if (!(efer & EFER_LMA))
-               (void)vmx_read_guest_kernel_gs_base(vmx);
-#endif
        vcpu->arch.efer = efer;
        if (efer & EFER_LMA) {
                vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
@@ -6078,9 +6057,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
                        mode |= MSR_BITMAP_MODE_X2APIC_APICV;
        }
 
-       if (is_long_mode(vcpu))
-               mode |= MSR_BITMAP_MODE_LM;
-
        return mode;
 }
 
@@ -6121,9 +6097,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
        if (!changed)
                return;
 
-       vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
-                                 !(mode & MSR_BITMAP_MODE_LM));
-
        if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
                vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
 
@@ -6189,6 +6162,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
        nested_mark_vmcs12_pages_dirty(vcpu);
 }
 
+static u8 vmx_get_rvi(void)
+{
+       return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+}
+
 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6201,7 +6179,7 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
                WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
                return false;
 
-       rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+       rvi = vmx_get_rvi();
 
        vapic_page = kmap(vmx->nested.virtual_apic_page);
        vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
@@ -10245,15 +10223,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
        if (!lapic_in_kernel(vcpu))
                return;
 
+       if (!flexpriority_enabled &&
+           !cpu_has_vmx_virtualize_x2apic_mode())
+               return;
+
        /* Postpone execution until vmcs01 is the current VMCS. */
        if (is_guest_mode(vcpu)) {
                to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
                return;
        }
 
-       if (!cpu_need_tpr_shadow(vcpu))
-               return;
-
        sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
        sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                              SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@@ -10375,6 +10354,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
        return max_irr;
 }
 
+static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
+{
+       u8 rvi = vmx_get_rvi();
+       u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
+
+       return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 {
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -11264,6 +11251,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
 #undef cr4_fixed1_update
 }
 
+static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (kvm_mpx_supported()) {
+               bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
+
+               if (mpx_enabled) {
+                       vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
+                       vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
+               } else {
+                       vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
+                       vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
+               }
+       }
+}
+
 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -11280,8 +11284,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
                to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
                        ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
 
-       if (nested_vmx_allowed(vcpu))
+       if (nested_vmx_allowed(vcpu)) {
                nested_vmx_cr_fixed1_bits_update(vcpu);
+               nested_vmx_entry_exit_ctls_update(vcpu);
+       }
 }
 
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -12049,8 +12055,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
        set_cr4_guest_host_mask(vmx);
 
-       if (vmx_mpx_supported())
-               vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+       if (kvm_mpx_supported()) {
+               if (vmx->nested.nested_run_pending &&
+                       (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+                       vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+               else
+                       vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+       }
 
        if (enable_vpid) {
                if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@@ -12595,15 +12606,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        bool from_vmentry = !!exit_qual;
        u32 dummy_exit_qual;
-       u32 vmcs01_cpu_exec_ctrl;
+       bool evaluate_pending_interrupts;
        int r = 0;
 
-       vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+       evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
+               (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
+       if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
+               evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
 
        enter_guest_mode(vcpu);
 
        if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
                vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+       if (kvm_mpx_supported() &&
+               !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+               vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
 
        vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
        vmx_segment_cache_clear(vmx);
@@ -12643,16 +12660,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
         * to L1 or delivered directly to L2 (e.g. In case L1 don't
         * intercept EXTERNAL_INTERRUPT).
         *
-        * Usually this would be handled by L0 requesting a
-        * IRQ/NMI window by setting VMCS accordingly. However,
-        * this setting was done on VMCS01 and now VMCS02 is active
-        * instead. Thus, we force L0 to perform pending event
-        * evaluation by requesting a KVM_REQ_EVENT.
-        */
-       if (vmcs01_cpu_exec_ctrl &
-               (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
+        * Usually this would be handled by the processor noticing an
+        * IRQ/NMI window request, or checking RVI during evaluation of
+        * pending virtual interrupts.  However, this setting was done
+        * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
+        * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
+        */
+       if (unlikely(evaluate_pending_interrupts))
                kvm_make_request(KVM_REQ_EVENT, vcpu);
-       }
 
        /*
         * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
index edbf00ec56b34f7765551280e2872b28d1cb0740..ca717737347e670d25ede51b1c84118fd43a3d9c 100644 (file)
@@ -4698,7 +4698,7 @@ static void kvm_init_msr_list(void)
                 */
                switch (msrs_to_save[i]) {
                case MSR_IA32_BNDCFGS:
-                       if (!kvm_x86_ops->mpx_supported())
+                       if (!kvm_mpx_supported())
                                continue;
                        break;
                case MSR_TSC_AUX:
index 94e1ed667b6ea383a99f1cd76d6917af0d2a1ba6..41317c50a44628e9ef4930e9f17ad6d8297c9190 100644 (file)
@@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 
        /*
         * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
-        * queue_hw_ctx after freeze the queue. So we could use q_usage_counter
-        * to avoid race with it. __blk_mq_update_nr_hw_queues will users
-        * synchronize_rcu to ensure all of the users go out of the critical
-        * section below and see zeroed q_usage_counter.
+        * queue_hw_ctx after freeze the queue, so we use q_usage_counter
+        * to avoid race with it.
         */
-       rcu_read_lock();
-       if (percpu_ref_is_zero(&q->q_usage_counter)) {
-               rcu_read_unlock();
+       if (!percpu_ref_tryget(&q->q_usage_counter))
                return;
-       }
 
        queue_for_each_hw_ctx(q, hctx, i) {
                struct blk_mq_tags *tags = hctx->tags;
@@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                        bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
                bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
        }
-       rcu_read_unlock();
+       blk_queue_exit(q);
 }
 
 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
index 85a1c1a59c72716ce2e31c280d7fd43d5c6e61e9..e3c39ea8e17b04b0787e53959cd4f68cb1a43f3d 100644 (file)
@@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                BUG_ON(!rq->q);
                if (rq->mq_ctx != this_ctx) {
                        if (this_ctx) {
-                               trace_block_unplug(this_q, depth, from_schedule);
+                               trace_block_unplug(this_q, depth, !from_schedule);
                                blk_mq_sched_insert_requests(this_q, this_ctx,
                                                                &ctx_list,
                                                                from_schedule);
@@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
         * on 'ctx_list'. Do those.
         */
        if (this_ctx) {
-               trace_block_unplug(this_q, depth, from_schedule);
+               trace_block_unplug(this_q, depth, !from_schedule);
                blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
                                                from_schedule);
        }
index 6a06b5d040e5dd8ffab230a3f3ae74bfa6935233..fae58b2f906fc5e0352c3f3194780abe13369784 100644 (file)
@@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q)
 
        while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
                ;
-       if (q->nr_sorted && printed++ < 10) {
+       if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
                printk(KERN_ERR "%s: forced dispatching is broken "
                       "(nr_sorted=%u), please report this\n",
                       q->elevator->type->elevator_name, q->nr_sorted);
index b3c0498ee4331f4c2a47a72fb74bce9328b4486e..8e9213b36e31371aed574de0252366f171e6104f 100644 (file)
@@ -226,8 +226,11 @@ static int alloc_lookup_fw_priv(const char *fw_name,
        }
 
        tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
-       if (tmp && !(opt_flags & FW_OPT_NOCACHE))
-               list_add(&tmp->list, &fwc->head);
+       if (tmp) {
+               INIT_LIST_HEAD(&tmp->list);
+               if (!(opt_flags & FW_OPT_NOCACHE))
+                       list_add(&tmp->list, &fwc->head);
+       }
        spin_unlock(&fwc->lock);
 
        *fw_priv = tmp;
index 3f68e2919dc5da70bb29fb1468038c76e3d61d8c..a690fd40026051453ba138d4919811b726b9789b 100644 (file)
@@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
        dpm_wait_for_subordinate(dev, async);
 
-       if (async_error)
+       if (async_error) {
+               dev->power.direct_complete = false;
                goto Complete;
+       }
 
        /*
         * If a device configured to wake up the system from sleep states
@@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
                pm_wakeup_event(dev, 0);
 
        if (pm_wakeup_pending()) {
+               dev->power.direct_complete = false;
                async_error = -EBUSY;
                goto Complete;
        }
index a71d817e900ddc07ff45d240f0ae290ff408b6cd..429d20131c7e228f81bcbd6dd72ed8a21290c14f 100644 (file)
@@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info)
                        list_del(&gnt_list_entry->node);
                        gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
                        rinfo->persistent_gnts_c--;
-                       __free_page(gnt_list_entry->page);
-                       kfree(gnt_list_entry);
+                       gnt_list_entry->gref = GRANT_INVALID_REF;
+                       list_add_tail(&gnt_list_entry->node, &rinfo->grants);
                }
 
                spin_unlock_irqrestore(&rinfo->ring_lock, flags);
index ec8a4376f74fb4f9da1f369a968df457064315e2..2fab18fae4fcbbeeb44cfbc4f2517258cd3dc505 100644 (file)
@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        data->base = of_iomap(node, 0);
        if (!data->base) {
                pr_err("Could not map PIT address\n");
-               return -ENXIO;
+               ret = -ENXIO;
+               goto exit;
        }
 
        data->mck = of_clk_get(node, 0);
        if (IS_ERR(data->mck)) {
                pr_err("Unable to get mck clk\n");
-               return PTR_ERR(data->mck);
+               ret = PTR_ERR(data->mck);
+               goto exit;
        }
 
        ret = clk_prepare_enable(data->mck);
        if (ret) {
                pr_err("Unable to enable mck\n");
-               return ret;
+               goto exit;
        }
 
        /* Get the interrupts property */
        data->irq = irq_of_parse_and_map(node, 0);
        if (!data->irq) {
                pr_err("Unable to get IRQ from DT\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto exit;
        }
 
        /*
@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        ret = clocksource_register_hz(&data->clksrc, pit_rate);
        if (ret) {
                pr_err("Failed to register clocksource\n");
-               return ret;
+               goto exit;
        }
 
        /* Set up irq handler */
@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
                          "at91_tick", data);
        if (ret) {
                pr_err("Unable to setup IRQ\n");
-               return ret;
+               clocksource_unregister(&data->clksrc);
+               goto exit;
        }
 
        /* Set up and register clockevents */
@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        clockevents_register_device(&data->clkevt);
 
        return 0;
+
+exit:
+       kfree(data);
+       return ret;
 }
 TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
                       at91sam926x_pit_dt_init);
index c020038ebfab2242ed844a143f3ce6706985685c..cf93f6419b5142e397747be406138dacf3278a5c 100644 (file)
@@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
        cr &= ~fttmr010->t1_enable_val;
        writel(cr, fttmr010->base + TIMER_CR);
 
-       /* Setup the match register forward/backward in time */
-       cr = readl(fttmr010->base + TIMER1_COUNT);
-       if (fttmr010->count_down)
-               cr -= cycles;
-       else
-               cr += cycles;
-       writel(cr, fttmr010->base + TIMER1_MATCH1);
+       if (fttmr010->count_down) {
+               /*
+                * ASPEED Timer Controller will load TIMER1_LOAD register
+                * into TIMER1_COUNT register when the timer is re-enabled.
+                */
+               writel(cycles, fttmr010->base + TIMER1_LOAD);
+       } else {
+               /* Setup the match register forward in time */
+               cr = readl(fttmr010->base + TIMER1_COUNT);
+               writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
+       }
 
        /* Start */
        cr = readl(fttmr010->base + TIMER_CR);
index 29e2e1a78a43372ee96e64bb9b93d6b21b5288f7..6949a9113dbb417aec69b444f365746220943e16 100644 (file)
@@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
                return -ENXIO;
        }
 
+       if (!of_machine_is_compatible("ti,am43"))
+               ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
        ti_32k_timer.counter = ti_32k_timer.base;
 
        /*
index a1830fa25fc5bd5eed0c627592c3a11c29e121da..2a3675c24032bc8059c4c591698d6a7b5218cf1d 100644 (file)
@@ -44,7 +44,7 @@ enum _msm8996_version {
 
 struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
 
-static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
+static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
 {
        size_t len;
        u32 *msm_id;
@@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
 }
 module_init(qcom_cpufreq_kryo_init);
 
-static void __init qcom_cpufreq_kryo_exit(void)
+static void __exit qcom_cpufreq_kryo_exit(void)
 {
        platform_device_unregister(kryo_cpufreq_pdev);
        platform_driver_unregister(&qcom_cpufreq_kryo_driver);
index d67667970f7e21ae265076beecf24710e89fde67..ec40f991e6c63c4e6be98df8fc964312bc9afbf2 100644 (file)
@@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
-       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-                        desc_bytes;
+       edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+                                                 desc_bytes);
        edesc->iv_dir = DMA_TO_DEVICE;
 
        /* Make sure IV is located in a DMAable area */
@@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
-       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-                        desc_bytes;
+       edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+                                                 desc_bytes);
        edesc->iv_dir = DMA_FROM_DEVICE;
 
        /* Make sure IV is located in a DMAable area */
index 5c539af8ed6048c9687dae5f99653d57a17bc4c7..010bbf607797f26acaecd1dc542ffa2fb049bf7a 100644 (file)
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
        walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 }
 
-static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
+                                int pci_chan_id)
 {
        struct cpl_rx_phys_dsgl *phys_cpl;
 
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
        phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
        phys_cpl->rss_hdr_int.qid = htons(qid);
        phys_cpl->rss_hdr_int.hash_val = 0;
+       phys_cpl->rss_hdr_int.channel = pci_chan_id;
 }
 
 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
                FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
                                !!lcb, ctx->tx_qidx);
 
-       chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
+       chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
                                                       qid);
        chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
                                     ((sizeof(chcr_req->wreq)) >> 4)));
@@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
                                    adap->vres.ncrypto_fc);
                rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
                txq_perchan = ntxq / u_ctx->lldi.nchan;
-               rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
-               rxq_idx += id % rxq_perchan;
-               txq_idx = ctx->dev->tx_channel_id * txq_perchan;
-               txq_idx += id % txq_perchan;
                spin_lock(&ctx->dev->lock_chcr_dev);
-               ctx->rx_qidx = rxq_idx;
-               ctx->tx_qidx = txq_idx;
+               ctx->tx_chan_id = ctx->dev->tx_channel_id;
                ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
                ctx->dev->rx_channel_id = 0;
                spin_unlock(&ctx->dev->lock_chcr_dev);
+               rxq_idx = ctx->tx_chan_id * rxq_perchan;
+               rxq_idx += id % rxq_perchan;
+               txq_idx = ctx->tx_chan_id * txq_perchan;
+               txq_idx += id % txq_perchan;
+               ctx->rx_qidx = rxq_idx;
+               ctx->tx_qidx = txq_idx;
+               /* Channel Id used by SGE to forward packet to Host.
+                * Same value should be used in cpl_fw6_pld RSS_CH field
+                * by FW. Driver programs PCI channel ID to be used in fw
+                * at the time of queue allocation with value "pi->tx_chan"
+                */
+               ctx->pci_chan_id = txq_idx / txq_perchan;
        }
 out:
        return err;
@@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct dsgl_walk dsgl_walk;
        unsigned int authsize = crypto_aead_authsize(tfm);
+       struct chcr_context *ctx = a_ctx(tfm);
        u32 temp;
 
        dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
        dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
        temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
        dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
-       dsgl_walk_end(&dsgl_walk, qid);
+       dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 
 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
                             unsigned short qid)
 {
        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+       struct chcr_context *ctx = c_ctx(tfm);
        struct dsgl_walk dsgl_walk;
 
        dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
        reqctx->dstsg = dsgl_walk.last_sg;
        reqctx->dst_ofst = dsgl_walk.last_sg_len;
 
-       dsgl_walk_end(&dsgl_walk, qid);
+       dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 
 void chcr_add_hash_src_ent(struct ahash_request *req,
index 54835cb109e561ee4ce68f673eaceda38a39e35e..0d2c70c344f39bdf34751a1d64edb2efea426e3b 100644 (file)
@@ -255,6 +255,8 @@ struct chcr_context {
        struct chcr_dev *dev;
        unsigned char tx_qidx;
        unsigned char rx_qidx;
+       unsigned char tx_chan_id;
+       unsigned char pci_chan_id;
        struct __crypto_ctx crypto_ctx[0];
 };
 
index a10c418d4e5c60b7aba530f5ce7a9a0865930489..56bd28174f5251c11c8996a959fc7e96160ee6ac 100644 (file)
@@ -63,7 +63,7 @@ struct dcp {
        struct dcp_coherent_block       *coh;
 
        struct completion               completion[DCP_MAX_CHANS];
-       struct mutex                    mutex[DCP_MAX_CHANS];
+       spinlock_t                      lock[DCP_MAX_CHANS];
        struct task_struct              *thread[DCP_MAX_CHANS];
        struct crypto_queue             queue[DCP_MAX_CHANS];
 };
@@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
 
        int ret;
 
-       do {
-               __set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-               mutex_lock(&sdcp->mutex[chan]);
+               spin_lock(&sdcp->lock[chan]);
                backlog = crypto_get_backlog(&sdcp->queue[chan]);
                arq = crypto_dequeue_request(&sdcp->queue[chan]);
-               mutex_unlock(&sdcp->mutex[chan]);
+               spin_unlock(&sdcp->lock[chan]);
+
+               if (!backlog && !arq) {
+                       schedule();
+                       continue;
+               }
+
+               set_current_state(TASK_RUNNING);
 
                if (backlog)
                        backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
                if (arq) {
                        ret = mxs_dcp_aes_block_crypt(arq);
                        arq->complete(arq, ret);
-                       continue;
                }
-
-               schedule();
-       } while (!kthread_should_stop());
+       }
 
        return 0;
 }
@@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
        rctx->ecb = ecb;
        actx->chan = DCP_CHAN_CRYPTO;
 
-       mutex_lock(&sdcp->mutex[actx->chan]);
+       spin_lock(&sdcp->lock[actx->chan]);
        ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-       mutex_unlock(&sdcp->mutex[actx->chan]);
+       spin_unlock(&sdcp->lock[actx->chan]);
 
        wake_up_process(sdcp->thread[actx->chan]);
 
@@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
        struct ahash_request *req;
        int ret, fini;
 
-       do {
-               __set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-               mutex_lock(&sdcp->mutex[chan]);
+               spin_lock(&sdcp->lock[chan]);
                backlog = crypto_get_backlog(&sdcp->queue[chan]);
                arq = crypto_dequeue_request(&sdcp->queue[chan]);
-               mutex_unlock(&sdcp->mutex[chan]);
+               spin_unlock(&sdcp->lock[chan]);
+
+               if (!backlog && !arq) {
+                       schedule();
+                       continue;
+               }
+
+               set_current_state(TASK_RUNNING);
 
                if (backlog)
                        backlog->complete(backlog, -EINPROGRESS);
@@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
                        ret = dcp_sha_req_to_buf(arq);
                        fini = rctx->fini;
                        arq->complete(arq, ret);
-                       if (!fini)
-                               continue;
                }
-
-               schedule();
-       } while (!kthread_should_stop());
+       }
 
        return 0;
 }
@@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
                rctx->init = 1;
        }
 
-       mutex_lock(&sdcp->mutex[actx->chan]);
+       spin_lock(&sdcp->lock[actx->chan]);
        ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-       mutex_unlock(&sdcp->mutex[actx->chan]);
+       spin_unlock(&sdcp->lock[actx->chan]);
 
        wake_up_process(sdcp->thread[actx->chan]);
        mutex_unlock(&actx->mutex);
@@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, sdcp);
 
        for (i = 0; i < DCP_MAX_CHANS; i++) {
-               mutex_init(&sdcp->mutex[i]);
+               spin_lock_init(&sdcp->lock[i]);
                init_completion(&sdcp->completion[i]);
                crypto_init_queue(&sdcp->queue[i], 50);
        }
index ba197f34c252a8d9acc57d400a478797577eeb2f..763c2166ee0ec970a03366c0897b11c9eea2e5a3 100644 (file)
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C3XXX_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 24ec908eb26c25c0f26efbe55b15fc34f421dcf9..613c7d5644ced6d250adefdf3d49a7c4d0876938 100644 (file)
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C3XXXIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 59a5a0df50b61ed310b7101df1874a14a632f83d..9cb832963357ddcf23686cc3bfec34960646d611 100644 (file)
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C62X_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index b9f3e0e4fde97dbbcc81c5614b9c348aecf6f717..278452b8ef81c8a5e5aa296a38bb8caf6402fdd3 100644 (file)
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C62XIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index be5c5a988ca59bed894d52e0e2d734f291e8f0f1..3a9708ef4ce2147b0aa6d60aba27a56506779e3f 100644 (file)
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 26ab17bfc6dabd0fb75d42284f3febef8e390e79..3da0f951cb590a555fea8a9d848c657888a305d9 100644 (file)
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 0b7e19c27c6ddc0beea61064c377d070e0a1b540..51a5ac2293a72c27e17bec80fa768986eb2751e4 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/fpga/fpga-mgr.h>
 #include <linux/fpga/fpga-region.h>
 
 #include "dfl-fme-pr.h"
@@ -66,9 +67,10 @@ static int fme_region_probe(struct platform_device *pdev)
 static int fme_region_remove(struct platform_device *pdev)
 {
        struct fpga_region *region = dev_get_drvdata(&pdev->dev);
+       struct fpga_manager *mgr = region->mgr;
 
        fpga_region_unregister(region);
-       fpga_mgr_put(region->mgr);
+       fpga_mgr_put(mgr);
 
        return 0;
 }
index 24b8f98b73ec69019a5421258c57123d8034cadf..c983dac97501b910e020996d53ec04609d99a265 100644 (file)
@@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
  *
  * Given a device, get an exclusive reference to a fpga bridge.
  *
- * Return: fpga manager struct or IS_ERR() condition containing error code.
+ * Return: fpga bridge struct or IS_ERR() condition containing error code.
  */
 struct fpga_bridge *fpga_bridge_get(struct device *dev,
                                    struct fpga_image_info *info)
index 35fabb8083fb0ee5724d3e26dc9874e9b11a3008..052a1342ab7e80d9a094112dd54b6334c943e972 100644 (file)
@@ -437,9 +437,10 @@ static int of_fpga_region_probe(struct platform_device *pdev)
 static int of_fpga_region_remove(struct platform_device *pdev)
 {
        struct fpga_region *region = platform_get_drvdata(pdev);
+       struct fpga_manager *mgr = region->mgr;
 
        fpga_region_unregister(region);
-       fpga_mgr_put(region->mgr);
+       fpga_mgr_put(mgr);
 
        return 0;
 }
index e8f8a199939350a97754031781d3733849a49525..a57300c1d649a36ef6ecbd207a2afd4e92b86513 100644 (file)
@@ -571,7 +571,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
                if (ret)
                        goto out_free_descs;
                lh->descs[i] = desc;
-               count = i;
+               count = i + 1;
 
                if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
                        set_bit(FLAG_ACTIVE_LOW, &desc->flags);
index 0cc5190f4f36e4a1b192a17b274bbd4fe38c00b6..5f3f540738187c6db03a7975bced71ea4163c9e0 100644 (file)
@@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
 {
        int i;
 
+       cancel_delayed_work_sync(&adev->vce.idle_work);
+
        if (adev->vce.vcpu_bo == NULL)
                return 0;
 
@@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
        if (i == AMDGPU_MAX_VCE_HANDLES)
                return 0;
 
-       cancel_delayed_work_sync(&adev->vce.idle_work);
        /* TODO: suspending running encoding sessions isn't supported */
        return -EINVAL;
 }
index fd654a4406db964da6611f6c2d3d9db1fb283519..400fc74bbae27e878aebe4e6e27f6eaf22ca8e15 100644 (file)
@@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
        unsigned size;
        void *ptr;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if (adev->vcn.vcpu_bo == NULL)
                return 0;
 
-       cancel_delayed_work_sync(&adev->vcn.idle_work);
-
        size = amdgpu_bo_size(adev->vcn.vcpu_bo);
        ptr = adev->vcn.cpu_addr;
 
index ec0d62a16e538c305f631b831432df0566b051c8..4f22e745df51b4c2aad4ec842f04ac96b4070f68 100644 (file)
@@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
                                        struct queue *q,
                                        struct qcm_process_device *qpd)
 {
-       int retval;
        struct mqd_manager *mqd_mgr;
+       int retval;
 
        mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
        if (!mqd_mgr)
@@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
        if (!q->properties.is_active)
                return 0;
 
-       retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
-                       &q->properties, q->process->mm);
+       if (WARN(q->process->mm != current->mm,
+                "should only run in user thread"))
+               retval = -EFAULT;
+       else
+               retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+                                          &q->properties, current->mm);
        if (retval)
                goto out_uninit_mqd;
 
@@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
                retval = map_queues_cpsch(dqm);
        else if (q->properties.is_active &&
                 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
-                 q->properties.type == KFD_QUEUE_TYPE_SDMA))
-               retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
-                                      &q->properties, q->process->mm);
+                 q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+               if (WARN(q->process->mm != current->mm,
+                        "should only run in user thread"))
+                       retval = -EFAULT;
+               else
+                       retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
+                                                  q->pipe, q->queue,
+                                                  &q->properties, current->mm);
+       }
 
 out_unlock:
        dqm_unlock(dqm);
@@ -653,6 +663,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                                          struct qcm_process_device *qpd)
 {
+       struct mm_struct *mm = NULL;
        struct queue *q;
        struct mqd_manager *mqd_mgr;
        struct kfd_process_device *pdd;
@@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                kfd_flush_tlb(pdd);
        }
 
+       /* Take a safe reference to the mm_struct, which may otherwise
+        * disappear even while the kfd_process is still referenced.
+        */
+       mm = get_task_mm(pdd->process->lead_thread);
+       if (!mm) {
+               retval = -EFAULT;
+               goto out;
+       }
+
        /* activate all active queues on the qpd */
        list_for_each_entry(q, &qpd->queues_list, list) {
                if (!q->properties.is_evicted)
@@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                q->properties.is_evicted = false;
                q->properties.is_active = true;
                retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
-                                      q->queue, &q->properties,
-                                      q->process->mm);
+                                      q->queue, &q->properties, mm);
                if (retval)
                        goto out;
                dqm->queue_count++;
        }
        qpd->evicted = 0;
 out:
+       if (mm)
+               mmput(mm);
        dqm_unlock(dqm);
        return retval;
 }
index 800f481a6995fce0129bb316e078d2dcd8ab9eeb..6903fe6c894ba053693c16d3dd23538d58026ea2 100644 (file)
@@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
        return NULL;
 }
 
+static void emulated_link_detect(struct dc_link *link)
+{
+       struct dc_sink_init_data sink_init_data = { 0 };
+       struct display_sink_capability sink_caps = { 0 };
+       enum dc_edid_status edid_status;
+       struct dc_context *dc_ctx = link->ctx;
+       struct dc_sink *sink = NULL;
+       struct dc_sink *prev_sink = NULL;
+
+       link->type = dc_connection_none;
+       prev_sink = link->local_sink;
+
+       if (prev_sink != NULL)
+               dc_sink_retain(prev_sink);
+
+       switch (link->connector_signal) {
+       case SIGNAL_TYPE_HDMI_TYPE_A: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+               break;
+       }
+
+       case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+               break;
+       }
+
+       case SIGNAL_TYPE_DVI_DUAL_LINK: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+               break;
+       }
+
+       case SIGNAL_TYPE_LVDS: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_LVDS;
+               break;
+       }
+
+       case SIGNAL_TYPE_EDP: {
+               sink_caps.transaction_type =
+                       DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+               sink_caps.signal = SIGNAL_TYPE_EDP;
+               break;
+       }
+
+       case SIGNAL_TYPE_DISPLAY_PORT: {
+               sink_caps.transaction_type =
+                       DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+               sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+               break;
+       }
+
+       default:
+               DC_ERROR("Invalid connector type! signal:%d\n",
+                       link->connector_signal);
+               return;
+       }
+
+       sink_init_data.link = link;
+       sink_init_data.sink_signal = sink_caps.signal;
+
+       sink = dc_sink_create(&sink_init_data);
+       if (!sink) {
+               DC_ERROR("Failed to create sink!\n");
+               return;
+       }
+
+       link->local_sink = sink;
+
+       edid_status = dm_helpers_read_local_edid(
+                       link->ctx,
+                       link,
+                       sink);
+
+       if (edid_status != EDID_OK)
+               DC_ERROR("Failed to read EDID");
+
+}
+
 static int dm_resume(void *handle)
 {
        struct amdgpu_device *adev = handle;
@@ -654,6 +735,7 @@ static int dm_resume(void *handle)
        struct drm_plane *plane;
        struct drm_plane_state *new_plane_state;
        struct dm_plane_state *dm_new_plane_state;
+       enum dc_connection_type new_connection_type = dc_connection_none;
        int ret;
        int i;
 
@@ -684,7 +766,13 @@ static int dm_resume(void *handle)
                        continue;
 
                mutex_lock(&aconnector->hpd_lock);
-               dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+               if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none)
+                       emulated_link_detect(aconnector->dc_link);
+               else
+                       dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
 
                if (aconnector->fake_enable && aconnector->dc_link->local_sink)
                        aconnector->fake_enable = false;
@@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param)
        struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
        struct drm_connector *connector = &aconnector->base;
        struct drm_device *dev = connector->dev;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        /* In case of failure or MST no need to update connector status or notify the OS
         * since (for MST case) MST does this in it's own context.
@@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param)
        if (aconnector->fake_enable)
                aconnector->fake_enable = false;
 
-       if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+       if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+               DRM_ERROR("KMS: Failed to detect connector\n");
+
+       if (aconnector->base.force && new_connection_type == dc_connection_none) {
+               emulated_link_detect(aconnector->dc_link);
+
+
+               drm_modeset_lock_all(dev);
+               dm_restore_drm_connector_state(dev, connector);
+               drm_modeset_unlock_all(dev);
+
+               if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+                       drm_kms_helper_hotplug_event(dev);
+
+       } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
                amdgpu_dm_update_connector_after_detect(aconnector);
 
 
@@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param)
        struct drm_device *dev = connector->dev;
        struct dc_link *dc_link = aconnector->dc_link;
        bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
         * conflict, after implement i2c helper, this mutex should be
@@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param)
        if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
                        !is_mst_root_connector) {
                /* Downstream Port status changed. */
-               if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+               if (!dc_link_detect_sink(dc_link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none) {
+                       emulated_link_detect(dc_link);
+
+                       if (aconnector->fake_enable)
+                               aconnector->fake_enable = false;
+
+                       amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+                       drm_modeset_lock_all(dev);
+                       dm_restore_drm_connector_state(dev, connector);
+                       drm_modeset_unlock_all(dev);
+
+                       drm_kms_helper_hotplug_event(dev);
+               } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
 
                        if (aconnector->fake_enable)
                                aconnector->fake_enable = false;
@@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        struct amdgpu_mode_info *mode_info = &adev->mode_info;
        uint32_t link_cnt;
        int32_t total_overlay_planes, total_primary_planes;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        link_cnt = dm->dc->caps.max_links;
        if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 
                link = dc_get_link_at_index(dm->dc, i);
 
-               if (dc_link_detect(link, DETECT_REASON_BOOT)) {
+               if (!dc_link_detect_sink(link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none) {
+                       emulated_link_detect(link);
+                       amdgpu_dm_update_connector_after_detect(aconnector);
+
+               } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
                        amdgpu_dm_update_connector_after_detect(aconnector);
                        register_backlight_device(dm, link);
                }
@@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        if (dm_state && dm_state->freesync_capable)
                stream->ignore_msa_timing_param = true;
 finish:
-       if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
+       if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
                dc_sink_release(sink);
 
        return stream;
@@ -4504,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        }
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
-       /* Signal HW programming completion */
-       drm_atomic_helper_commit_hw_done(state);
 
        if (wait_for_vblank)
                drm_atomic_helper_wait_for_flip_done(dev, state);
 
+       /*
+        * FIXME:
+        * Delay hw_done() until flip_done() is signaled. This is to block
+        * another commit from freeing the CRTC state while we're still
+        * waiting on flip_done.
+        */
+       drm_atomic_helper_commit_hw_done(state);
+
        drm_atomic_helper_cleanup_planes(dev, state);
 
        /* Finally, drop a runtime PM reference for each newly disabled CRTC,
index 37eaf72ace549d6f132b9fc5933da434fc164396..fced3c1c2ef5f6ac117a993714de8bf24da2258a 100644 (file)
@@ -195,7 +195,7 @@ static bool program_hpd_filter(
        return result;
 }
 
-static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
 {
        uint32_t is_hpd_high = 0;
        struct gpio *hpd_pin;
@@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
        if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
                return false;
 
-       if (false == detect_sink(link, &new_connection_type)) {
+       if (false == dc_link_detect_sink(link, &new_connection_type)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
index d43cefbc43d3b195c40ad6f6829eadeaeb96daee..1b48ab9aea897cd28422a57c11909a769cd7a6a3 100644 (file)
@@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
 
 bool dc_link_is_dp_sink_present(struct dc_link *link);
 
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
 /*
  * DPCD access interfaces
  */
index 14384d9675a8c4cc23fc67162f9f3f9cd7a17a22..b2f308766a9e8e5cfc6c34dea6e9b4515cf7fc8d 100644 (file)
@@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements(
        dc->prev_display_config = *pp_display_cfg;
 }
 
-void dce110_set_bandwidth(
+static void dce110_set_bandwidth(
                struct dc *dc,
                struct dc_state *context,
                bool decrease_allowed)
index e4c5db75c4c656b010e16ab490286801bb94bfb4..d6db3dbd90153ba4a3f9511eb494552b143b4255 100644 (file)
@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
        const struct dc_state *context,
        struct dm_pp_display_configuration *pp_display_cfg);
 
-void dce110_set_bandwidth(
-               struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed);
-
 uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
 
 void dp_receiver_power_ctrl(struct dc_link *link, bool on);
index 5853522a618298a6bdc20df629f1c494d13468ae..eb0f5f9a973b9b2f79793023d8f1cdf21aae787c 100644 (file)
@@ -244,17 +244,6 @@ static void dce120_update_dchub(
        dh_data->dchub_info_valid = false;
 }
 
-static void dce120_set_bandwidth(
-               struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed)
-{
-       if (context->stream_count <= 0)
-               return;
-
-       dce110_set_bandwidth(dc, context, decrease_allowed);
-}
-
 void dce120_hw_sequencer_construct(struct dc *dc)
 {
        /* All registers used by dce11.2 match those in dce11 in offset and
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
        dce110_hw_sequencer_construct(dc);
        dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
        dc->hwss.update_dchub = dce120_update_dchub;
-       dc->hwss.set_bandwidth = dce120_set_bandwidth;
 }
 
index 08b5bb219816ad38f929a23ccb69b378cba2101f..94d6dabec2dc80ee47794430d0642de6846fbfe4 100644 (file)
@@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev)
        drm->irq_enabled = true;
 
        ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+       drm_crtc_vblank_reset(&malidp->crtc);
        if (ret < 0) {
                DRM_ERROR("failed to initialise vblank\n");
                goto vblank_fail;
index c94a4422e0e9100a607a8b817878c25942ce77bd..2781e462c1ed5dd7b275ad0d9b9cccc1cc258a1f 100644 (file)
@@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
 
 static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
                                     dma_addr_t *addrs, s32 *pitches,
-                                    int num_planes, u16 w, u16 h, u32 fmt_id)
+                                    int num_planes, u16 w, u16 h, u32 fmt_id,
+                                    const s16 *rgb2yuv_coeffs)
 {
        u32 base = MALIDP500_SE_MEMWRITE_BASE;
        u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
 
        malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
                        MALIDP500_SE_MEMWRITE_OUT_SIZE);
+
+       if (rgb2yuv_coeffs) {
+               int i;
+
+               for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+                       malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+                                       MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
+               }
+       }
+
        malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
 
        return 0;
@@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
 
 static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
                                     dma_addr_t *addrs, s32 *pitches,
-                                    int num_planes, u16 w, u16 h, u32 fmt_id)
+                                    int num_planes, u16 w, u16 h, u32 fmt_id,
+                                    const s16 *rgb2yuv_coeffs)
 {
        u32 base = MALIDP550_SE_MEMWRITE_BASE;
        u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
        malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
                          MALIDP550_SE_CONTROL);
 
+       if (rgb2yuv_coeffs) {
+               int i;
+
+               for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+                       malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+                                       MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
+               }
+       }
+
        return 0;
 }
 
index ad2e96915d44a253c8d77dc2e5d98c22c758fc5c..9fc94c08190f23184985e3beb40f4cc88dc2e430 100644 (file)
@@ -191,7 +191,8 @@ struct malidp_hw {
         * @param fmt_id - internal format ID of output buffer
         */
        int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
-                              s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
+                              s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
+                              const s16 *rgb2yuv_coeffs);
 
        /*
         * Disable the writing to memory of the next frame's content.
index ba6ae66387c9129063cc894ed99c5dc0854a4c07..91472e5e0c8b8a3b0c19a2fb5f860fac83ffcb5b 100644 (file)
@@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
        s32 pitches[2];
        u8 format;
        u8 n_planes;
+       bool rgb2yuv_initialized;
+       const s16 *rgb2yuv_coeffs;
 };
 
 static int malidp_mw_connector_get_modes(struct drm_connector *connector)
@@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
 static struct drm_connector_state *
 malidp_mw_connector_duplicate_state(struct drm_connector *connector)
 {
-       struct malidp_mw_connector_state *mw_state;
+       struct malidp_mw_connector_state *mw_state, *mw_current_state;
 
        if (WARN_ON(!connector->state))
                return NULL;
@@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
        if (!mw_state)
                return NULL;
 
-       /* No need to preserve any of our driver-local data */
+       mw_current_state = to_mw_state(connector->state);
+       mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
+       mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
+
        __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
 
        return &mw_state->base;
@@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
+static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
+       47,  157,   16,
+       -26,  -87,  112,
+       112, -102,  -10,
+       16,  128,  128
+};
+
 static int
 malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
                               struct drm_crtc_state *crtc_state,
@@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
        }
        mw_state->n_planes = n_planes;
 
+       if (fb->format->is_yuv)
+               mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
+
        return 0;
 }
 
@@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
 
                drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
                conn_state->writeback_job = NULL;
-
                hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
                                           mw_state->pitches, mw_state->n_planes,
-                                          fb->width, fb->height, mw_state->format);
+                                          fb->width, fb->height, mw_state->format,
+                                          !mw_state->rgb2yuv_initialized ?
+                                          mw_state->rgb2yuv_coeffs : NULL);
+               mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
        } else {
                DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
                hwdev->hw->disable_memwrite(hwdev);
index 3579d36b2a717aedc436cd574b8639f9ea1d7d9b..6ffe849774f2edbec087aabe640ba0f61720b6f2 100644 (file)
 #define MALIDP500_SE_BASE              0x00c00
 #define MALIDP500_SE_CONTROL           0x00c0c
 #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
+#define MALIDP500_SE_RGB_YUV_COEFFS    0x00C74
 #define MALIDP500_SE_MEMWRITE_BASE     0x00e00
 #define MALIDP500_DC_IRQ_BASE          0x00f00
 #define MALIDP500_CONFIG_VALID         0x00f00
 #define MALIDP550_SE_CONTROL           0x08010
 #define   MALIDP550_SE_MEMWRITE_ONESHOT        (1 << 7)
 #define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
+#define MALIDP550_SE_RGB_YUV_COEFFS    0x08078
 #define MALIDP550_SE_MEMWRITE_BASE     0x08100
 #define MALIDP550_DC_BASE              0x0c000
 #define MALIDP550_DC_CONTROL           0x0c010
index baff50a4c2349dfec2ae8df1f14c0b5714f29020..df31c3815092b33f6fbbd6db1cebf341642e1b66 100644 (file)
@@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
 EXPORT_SYMBOL(drm_client_close);
 
 /**
- * drm_client_new - Create a DRM client
+ * drm_client_init - Initialise a DRM client
  * @dev: DRM device
  * @client: DRM client
  * @name: Client name
  * @funcs: DRM client functions (optional)
  *
+ * This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
  * The caller needs to hold a reference on @dev before calling this function.
  * The client is freed when the &drm_device is unregistered. See drm_client_release().
  *
  * Returns:
  * Zero on success or negative error code on failure.
  */
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
-                  const char *name, const struct drm_client_funcs *funcs)
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+                   const char *name, const struct drm_client_funcs *funcs)
 {
        int ret;
 
@@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
        if (ret)
                goto err_put_module;
 
-       mutex_lock(&dev->clientlist_mutex);
-       list_add(&client->list, &dev->clientlist);
-       mutex_unlock(&dev->clientlist_mutex);
-
        drm_dev_get(dev);
 
        return 0;
@@ -109,13 +106,33 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
 
        return ret;
 }
-EXPORT_SYMBOL(drm_client_new);
+EXPORT_SYMBOL(drm_client_init);
+
+/**
+ * drm_client_add - Add client to the device list
+ * @client: DRM client
+ *
+ * Add the client to the &drm_device client list to activate its callbacks.
+ * @client must be initialized by a call to drm_client_init(). After
+ * drm_client_add() it is no longer permissible to call drm_client_release()
+ * directly (outside the unregister callback), instead cleanup will happen
+ * automatically on driver unload.
+ */
+void drm_client_add(struct drm_client_dev *client)
+{
+       struct drm_device *dev = client->dev;
+
+       mutex_lock(&dev->clientlist_mutex);
+       list_add(&client->list, &dev->clientlist);
+       mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_add);
 
 /**
  * drm_client_release - Release DRM client resources
  * @client: DRM client
  *
- * Releases resources by closing the &drm_file that was opened by drm_client_new().
+ * Releases resources by closing the &drm_file that was opened by drm_client_init().
  * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
  *
  * This function should only be called from the unregister callback. An exception
index 9da36a6271d3a24380e6a1221ae027701eba6e52..9ac1f2e0f064cb72528834ed6c21a82daf37be45 100644 (file)
@@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
 
        fb_helper = &fbdev_cma->fb_helper;
 
-       ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
+       ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
        if (ret)
                goto err_free;
 
@@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
        if (ret)
                goto err_client_put;
 
+       drm_client_add(&fb_helper->client);
+
        return fbdev_cma;
 
 err_client_put:
index 16ec93b75dbfaf87fca050d84dfe8ee292226424..515a7aec57acc48505f195f462a1c46c69da7cbb 100644 (file)
@@ -3218,12 +3218,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
        if (!fb_helper)
                return -ENOMEM;
 
-       ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+       ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
        if (ret) {
                kfree(fb_helper);
                return ret;
        }
 
+       drm_client_add(&fb_helper->client);
+
        fb_helper->preferred_bpp = preferred_bpp;
 
        drm_fbdev_client_hotplug(&fb_helper->client);
index b54fb78a283c642e8541370482c627ea9567dc8e..b82da96ded5c85d847c9c7566236f1deaf48ffc2 100644 (file)
@@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
        lessee_priv->is_master = 1;
        lessee_priv->authenticated = 1;
 
-       /* Hook up the fd */
-       fd_install(fd, lessee_file);
-
        /* Pass fd back to userspace */
        DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
        cl->fd = fd;
        cl->lessee_id = lessee->lessee_id;
 
+       /* Hook up the fd */
+       fd_install(fd, lessee_file);
+
        DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
        return 0;
 
index b902361dee6e1db300c10ce5798de7bada296b65..1d9a9d2fe0e098c4f00b59816b92d262c9620073 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/err.h>
 #include <linux/module.h>
 
-#include <drm/drm_device.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_panel.h>
 
@@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
        if (panel->connector)
                return -EBUSY;
 
-       panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
-       if (!panel->link) {
-               dev_err(panel->dev, "failed to link panel to %s\n",
-                       dev_name(connector->dev->dev));
-               return -EINVAL;
-       }
-
        panel->connector = connector;
        panel->drm = connector->dev;
 
@@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
  */
 int drm_panel_detach(struct drm_panel *panel)
 {
-       device_link_del(panel->link);
-
        panel->connector = NULL;
        panel->drm = NULL;
 
index adb3cb27d31e6fa6aa1f0c3102c7d8ede5599169..759278fef35ae6ee4f9889b09c0dfffef2af8ca8 100644 (file)
@@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
 {
        int ret;
 
+       WARN_ON(*fence);
+
        *fence = drm_syncobj_fence_get(syncobj);
        if (*fence)
                return 1;
@@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 
        if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
                for (i = 0; i < count; ++i) {
+                       if (entries[i].fence)
+                               continue;
+
                        drm_syncobj_fence_get_or_add_callback(syncobjs[i],
                                                              &entries[i].fence,
                                                              &entries[i].syncobj_cb,
index 9b2720b41571f245a1ba5ad677bb0566d95ca207..83c1f46670bfea9dcbe95e42da598c8665576b03 100644 (file)
@@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct component_match *match = NULL;
 
-       dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-
        if (!dev->platform_data) {
                struct device_node *core_node;
 
@@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
        for_each_compatible_node(np, NULL, "vivante,gc") {
                if (!of_device_is_available(np))
                        continue;
-               pdev = platform_device_register_simple("etnaviv", -1,
-                                                      NULL, 0);
-               if (IS_ERR(pdev)) {
-                       ret = PTR_ERR(pdev);
+
+               pdev = platform_device_alloc("etnaviv", -1);
+               if (!pdev) {
+                       ret = -ENOMEM;
+                       of_node_put(np);
+                       goto unregister_platform_driver;
+               }
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+               /*
+                * Apply the same DMA configuration to the virtual etnaviv
+                * device as the GPU we found. This assumes that all Vivante
+                * GPUs in the system share the same DMA constraints.
+                */
+               of_dma_configure(&pdev->dev, np, true);
+
+               ret = platform_device_add(pdev);
+               if (ret) {
+                       platform_device_put(pdev);
                        of_node_put(np);
                        goto unregister_platform_driver;
                }
+
                etnaviv_drm = pdev;
                of_node_put(np);
                break;
index 87f6b5672e1193a1df76bf3c09eb69abfd966e55..797d9ee5f15a75c5aee52966d0145b7023e94574 100644 (file)
@@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
 static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
                                        unsigned long start, unsigned long size)
 {
-       struct iommu_domain *domain;
-       int ret;
-
-       domain = iommu_domain_alloc(priv->dma_dev->bus);
-       if (!domain)
-               return -ENOMEM;
-
-       ret = iommu_get_dma_cookie(domain);
-       if (ret)
-               goto free_domain;
-
-       ret = iommu_dma_init_domain(domain, start, size, NULL);
-       if (ret)
-               goto put_cookie;
-
-       priv->mapping = domain;
+       priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
        return 0;
-
-put_cookie:
-       iommu_put_dma_cookie(domain);
-free_domain:
-       iommu_domain_free(domain);
-       return ret;
 }
 
 static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
 {
-       struct iommu_domain *domain = priv->mapping;
-
-       iommu_put_dma_cookie(domain);
-       iommu_domain_free(domain);
        priv->mapping = NULL;
 }
 
@@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
 {
        struct iommu_domain *domain = priv->mapping;
 
-       return iommu_attach_device(domain, dev);
+       if (dev != priv->dma_dev)
+               return iommu_attach_device(domain, dev);
+       return 0;
 }
 
 static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
@@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
 {
        struct iommu_domain *domain = priv->mapping;
 
-       iommu_detach_device(domain, dev);
+       if (dev != priv->dma_dev)
+               iommu_detach_device(domain, dev);
 }
 #else
 #error Unsupported architecture and IOMMU/DMA-mapping glue code
index 5d2f0d548469e1dd4808d8be5dfe8be62edfef0b..250b5e02a314a493fd0c41477237bf44f7208225 100644 (file)
@@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
                        break;
                }
                /* TDA9950 executes all retries for us */
-               tx_status |= CEC_TX_STATUS_MAX_RETRIES;
+               if (tx_status != CEC_TX_STATUS_OK)
+                       tx_status |= CEC_TX_STATUS_MAX_RETRIES;
                cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
                                  nack_cnt, 0, err_cnt);
                break;
@@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
        /* Wait up to .5s for it to signal non-busy */
        do {
                csr = tda9950_read(client, REG_CSR);
-               if (!(csr & CSR_BUSY) || --timeout)
+               if (!(csr & CSR_BUSY) || !--timeout)
                        break;
                msleep(10);
        } while (1);
index f7f2aa71d8d99f1c4fa4e4d4632adeb48ecd50f3..a262a64f562565d80642cf887788ac261991336e 100644 (file)
@@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
        return true;
 }
 
+static void *compress_next_page(struct drm_i915_error_object *dst)
+{
+       unsigned long page;
+
+       if (dst->page_count >= dst->num_pages)
+               return ERR_PTR(-ENOSPC);
+
+       page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+       if (!page)
+               return ERR_PTR(-ENOMEM);
+
+       return dst->pages[dst->page_count++] = (void *)page;
+}
+
 static int compress_page(struct compress *c,
                         void *src,
                         struct drm_i915_error_object *dst)
@@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
 
        do {
                if (zstream->avail_out == 0) {
-                       unsigned long page;
-
-                       page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
-                       if (!page)
-                               return -ENOMEM;
+                       zstream->next_out = compress_next_page(dst);
+                       if (IS_ERR(zstream->next_out))
+                               return PTR_ERR(zstream->next_out);
 
-                       dst->pages[dst->page_count++] = (void *)page;
-
-                       zstream->next_out = (void *)page;
                        zstream->avail_out = PAGE_SIZE;
                }
 
-               if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+               if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
                        return -EIO;
        } while (zstream->avail_in);
 
@@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
        return 0;
 }
 
-static void compress_fini(struct compress *c,
+static int compress_flush(struct compress *c,
                          struct drm_i915_error_object *dst)
 {
        struct z_stream_s *zstream = &c->zstream;
 
-       if (dst) {
-               zlib_deflate(zstream, Z_FINISH);
-               dst->unused = zstream->avail_out;
-       }
+       do {
+               switch (zlib_deflate(zstream, Z_FINISH)) {
+               case Z_OK: /* more space requested */
+                       zstream->next_out = compress_next_page(dst);
+                       if (IS_ERR(zstream->next_out))
+                               return PTR_ERR(zstream->next_out);
+
+                       zstream->avail_out = PAGE_SIZE;
+                       break;
+
+               case Z_STREAM_END:
+                       goto end;
+
+               default: /* any error */
+                       return -EIO;
+               }
+       } while (1);
+
+end:
+       memset(zstream->next_out, 0, zstream->avail_out);
+       dst->unused = zstream->avail_out;
+       return 0;
+}
+
+static void compress_fini(struct compress *c,
+                         struct drm_i915_error_object *dst)
+{
+       struct z_stream_s *zstream = &c->zstream;
 
        zlib_deflateEnd(zstream);
        kfree(zstream->workspace);
-
        if (c->tmp)
                free_page((unsigned long)c->tmp);
 }
@@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
        return 0;
 }
 
+static int compress_flush(struct compress *c,
+                         struct drm_i915_error_object *dst)
+{
+       return 0;
+}
+
 static void compress_fini(struct compress *c,
                          struct drm_i915_error_object *dst)
 {
@@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
        unsigned long num_pages;
        struct sgt_iter iter;
        dma_addr_t dma;
+       int ret;
 
        if (!vma)
                return NULL;
@@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
 
        dst->gtt_offset = vma->node.start;
        dst->gtt_size = vma->node.size;
+       dst->num_pages = num_pages;
        dst->page_count = 0;
        dst->unused = 0;
 
@@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
                return NULL;
        }
 
+       ret = -EINVAL;
        for_each_sgt_dma(dma, iter, vma->pages) {
                void __iomem *s;
-               int ret;
 
                ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
 
                s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
                ret = compress_page(&compress, (void  __force *)s, dst);
                io_mapping_unmap_atomic(s);
-
                if (ret)
-                       goto unwind;
+                       break;
        }
-       goto out;
 
-unwind:
-       while (dst->page_count--)
-               free_page((unsigned long)dst->pages[dst->page_count]);
-       kfree(dst);
-       dst = NULL;
+       if (ret || compress_flush(&compress, dst)) {
+               while (dst->page_count--)
+                       free_page((unsigned long)dst->pages[dst->page_count]);
+               kfree(dst);
+               dst = NULL;
+       }
 
-out:
        compress_fini(&compress, dst);
        ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
        return dst;
index f893a4e8b7831d7b214cf60ab7bcf9b058070714..8710fb18ed746cface7e9a7b2d6d6ac7cd06b2b4 100644 (file)
@@ -135,6 +135,7 @@ struct i915_gpu_state {
                struct drm_i915_error_object {
                        u64 gtt_offset;
                        u64 gtt_size;
+                       int num_pages;
                        int page_count;
                        int unused;
                        u32 *pages[0];
index 90628a47ae17f81312dff51ddbc89aff4af55654..29877969310dae65ff8f0a035285deb463c0d342 100644 (file)
@@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
        spin_unlock(&i915->irq_lock);
 }
 
-static void
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
-                     u32 *iir)
+static u32
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
 {
        void __iomem * const regs = dev_priv->regs;
+       u32 iir;
 
        if (!(master_ctl & GEN11_GU_MISC_IRQ))
-               return;
+               return 0;
+
+       iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+       if (likely(iir))
+               raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
 
-       *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
-       if (likely(*iir))
-               raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+       return iir;
 }
 
 static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
-                         const u32 master_ctl, const u32 iir)
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
 {
-       if (!(master_ctl & GEN11_GU_MISC_IRQ))
-               return;
-
-       if (unlikely(!iir)) {
-               DRM_ERROR("GU_MISC iir blank!\n");
-               return;
-       }
-
        if (iir & GEN11_GU_MISC_GSE)
                intel_opregion_asle_intr(dev_priv);
-       else
-               DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
 }
 
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
@@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
                enable_rpm_wakeref_asserts(i915);
        }
 
-       gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+       gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
 
        /* Acknowledge and enable interrupts. */
        raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
 
-       gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+       gen11_gu_misc_irq_handler(i915, gu_misc_iir);
 
        return IRQ_HANDLED;
 }
index 6a4d1388ad2d39b2f972e0d1e1270bab8d3ce8df..1df3ce134cd0086de8b05cc7dc0fce4de4e91926 100644 (file)
@@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
        GEN10_FEATURES, \
        GEN(11), \
        .ddb_size = 2048, \
-       .has_csr = 0, \
        .has_logical_ring_elsq = 1
 
 static const struct intel_device_info intel_icelake_11_info = {
index 5146ee029db4bd6c35bb3a15cdd25b6caf54c64e..bc49909aba8e664b6675fcac13921128c661dce9 100644 (file)
 #define USB_DEVICE_ID_SIS817_TOUCH     0x0817
 #define USB_DEVICE_ID_SIS_TS           0x1013
 #define USB_DEVICE_ID_SIS1030_TOUCH    0x1030
-#define USB_DEVICE_ID_SIS10FB_TOUCH    0x10fb
 
 #define USB_VENDOR_ID_SKYCABLE                 0x1223
 #define        USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER       0x3F07
index f3076659361abcb0567804c298af25e278de8fa9..4e3592e7a3f7217f86fe0fba59d3ea73551dcab2 100644 (file)
@@ -47,7 +47,7 @@
 /* quirks to control the device */
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV       BIT(0)
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
-#define I2C_HID_QUIRK_RESEND_REPORT_DESCR      BIT(2)
+#define I2C_HID_QUIRK_NO_RUNTIME_PM            BIT(2)
 
 /* flags */
 #define I2C_HID_STARTED                0
@@ -169,9 +169,8 @@ static const struct i2c_hid_quirks {
        { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
-               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
-       { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
-               I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
+               I2C_HID_QUIRK_NO_RUNTIME_PM },
        { 0, 0 }
 };
 
@@ -1105,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
                goto err_mem_free;
        }
 
-       pm_runtime_put(&client->dev);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+               pm_runtime_put(&client->dev);
+
        return 0;
 
 err_mem_free:
@@ -1130,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        struct hid_device *hid;
 
-       pm_runtime_get_sync(&client->dev);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+               pm_runtime_get_sync(&client->dev);
        pm_runtime_disable(&client->dev);
        pm_runtime_set_suspended(&client->dev);
        pm_runtime_put_noidle(&client->dev);
@@ -1236,22 +1238,13 @@ static int i2c_hid_resume(struct device *dev)
 
        /* Instead of resetting device, simply powers the device on. This
         * solves "incomplete reports" on Raydium devices 2386:3118 and
-        * 2386:4B33
+        * 2386:4B33 and fixes various SIS touchscreens no longer sending
+        * data after a suspend/resume.
         */
        ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
        if (ret)
                return ret;
 
-       /* Some devices need to re-send report descr cmd
-        * after resume, after this it will be back normal.
-        * otherwise it issues too many incomplete reports.
-        */
-       if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
-               ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
-               if (ret)
-                       return ret;
-       }
-
        if (hid->driver && hid->driver->reset_resume) {
                ret = hid->driver->reset_resume(hid);
                return ret;
index da133716bed05b63dadef22e072f05b4e7b0f5da..08a8327dfd224852cb81599959eeac09fd0c5a9a 100644 (file)
@@ -29,6 +29,7 @@
 #define CNL_Ax_DEVICE_ID       0x9DFC
 #define GLK_Ax_DEVICE_ID       0x31A2
 #define CNL_H_DEVICE_ID                0xA37C
+#define ICL_MOBILE_DEVICE_ID   0x34FC
 #define SPT_H_DEVICE_ID                0xA135
 
 #define        REVISION_ID_CHT_A0      0x6
index a1125a5c7965a255f8b5480f47cc8a53b534b76f..256b3016116cecca6ece2f8ae2d94422cd875251 100644 (file)
@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
        {0, }
 };
index ced0418994568239494d1e6133665bd504a1299b..f4d08c8ac7f8ff8f101cbe477826a0924b63170d 100644 (file)
@@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
                                        __u32 version)
 {
        int ret = 0;
+       unsigned int cur_cpu;
        struct vmbus_channel_initiate_contact *msg;
        unsigned long flags;
 
@@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
         * the CPU attempting to connect may not be CPU 0.
         */
        if (version >= VERSION_WIN8_1) {
-               msg->target_vcpu =
-                       hv_cpu_number_to_vp_number(smp_processor_id());
-               vmbus_connection.connect_cpu = smp_processor_id();
+               cur_cpu = get_cpu();
+               msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+               vmbus_connection.connect_cpu = cur_cpu;
+               put_cpu();
        } else {
                msg->target_vcpu = 0;
                vmbus_connection.connect_cpu = 0;
index 94d94b4a9a0d989d932101422eb87feeb9cf7525..18cc324f3ca94541d830f825693ee42b6cf60df9 100644 (file)
@@ -34,11 +34,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
 
 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
 {
-       u32 ic_clk = i2c_dw_clk_rate(dev);
        const char *mode_str, *fp_str = "";
        u32 comp_param1;
        u32 sda_falling_time, scl_falling_time;
        struct i2c_timings *t = &dev->timings;
+       u32 ic_clk;
        int ret;
 
        ret = i2c_dw_acquire_lock(dev);
@@ -53,6 +53,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
 
        /* Calculate SCL timing parameters for standard mode if not set */
        if (!dev->ss_hcnt || !dev->ss_lcnt) {
+               ic_clk = i2c_dw_clk_rate(dev);
                dev->ss_hcnt =
                        i2c_dw_scl_hcnt(ic_clk,
                                        4000,   /* tHD;STA = tHIGH = 4.0 us */
@@ -89,6 +90,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
         * needed also in high speed mode.
         */
        if (!dev->fs_hcnt || !dev->fs_lcnt) {
+               ic_clk = i2c_dw_clk_rate(dev);
                dev->fs_hcnt =
                        i2c_dw_scl_hcnt(ic_clk,
                                        600,    /* tHD;STA = tHIGH = 0.6 us */
index 0cf1379f4e8091b8627ed5bdf5c3dfe03a536630..5c754bf659e2801df5fc72a2d805dba58a53d171 100644 (file)
@@ -164,7 +164,7 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
                 * run ~75 kHz instead which should do no harm.
                 */
                dev_notice(&sch_adapter.dev,
-                       "Clock divider unitialized. Setting defaults\n");
+                       "Clock divider uninitialized. Setting defaults\n");
                outw(backbone_speed / (4 * 100), SMBHSTCLK);
        }
 
index 36732eb688a4afa6075f159c346b566d14d64746..9f2eb02481d348c60512c69c1b58e375c698f36c 100644 (file)
@@ -367,20 +367,26 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
        dma_addr_t rx_dma;
        enum geni_se_xfer_mode mode;
        unsigned long time_left = XFER_TIMEOUT;
+       void *dma_buf;
 
        gi2c->cur = msg;
-       mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+       mode = GENI_SE_FIFO;
+       dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+       if (dma_buf)
+               mode = GENI_SE_DMA;
+
        geni_se_select_mode(&gi2c->se, mode);
        writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
        geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
        if (mode == GENI_SE_DMA) {
                int ret;
 
-               ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len,
+               ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len,
                                                                &rx_dma);
                if (ret) {
                        mode = GENI_SE_FIFO;
                        geni_se_select_mode(&gi2c->se, mode);
+                       i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
                }
        }
 
@@ -393,6 +399,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
                if (gi2c->err)
                        geni_i2c_rx_fsm_rst(gi2c);
                geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
+               i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
        }
        return gi2c->err;
 }
@@ -403,20 +410,26 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
        dma_addr_t tx_dma;
        enum geni_se_xfer_mode mode;
        unsigned long time_left;
+       void *dma_buf;
 
        gi2c->cur = msg;
-       mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+       mode = GENI_SE_FIFO;
+       dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+       if (dma_buf)
+               mode = GENI_SE_DMA;
+
        geni_se_select_mode(&gi2c->se, mode);
        writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
        geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
        if (mode == GENI_SE_DMA) {
                int ret;
 
-               ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len,
+               ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len,
                                                                &tx_dma);
                if (ret) {
                        mode = GENI_SE_FIFO;
                        geni_se_select_mode(&gi2c->se, mode);
+                       i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
                }
        }
 
@@ -432,6 +445,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
                if (gi2c->err)
                        geni_i2c_tx_fsm_rst(gi2c);
                geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
+               i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
        }
        return gi2c->err;
 }
index a01389b85f1340a29c5b25dd6a9e5f97c4373b2c..7e9a2bbf5ddcb967459367778a834c8314ff6f2b 100644 (file)
@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
                        mt_params[3].type = ACPI_TYPE_INTEGER;
                        mt_params[3].integer.value = len;
                        mt_params[4].type = ACPI_TYPE_BUFFER;
+                       mt_params[4].buffer.length = len;
                        mt_params[4].buffer.pointer = data->block + 1;
                }
                break;
index 0bee1f4b914e751d9893875665d0b588d7e3258d..3208ad6ad54014776cd333f4ac068c56cc737484 100644 (file)
@@ -337,6 +337,39 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
        return 0;
 }
 
+/**
+ * del_gid - Delete GID table entry
+ *
+ * @ib_dev:    IB device whose GID entry to be deleted
+ * @port:      Port number of the IB device
+ * @table:     GID table of the IB device for a port
+ * @ix:                GID entry index to delete
+ *
+ */
+static void del_gid(struct ib_device *ib_dev, u8 port,
+                   struct ib_gid_table *table, int ix)
+{
+       struct ib_gid_table_entry *entry;
+
+       lockdep_assert_held(&table->lock);
+
+       pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
+                ib_dev->name, port, ix,
+                table->data_vec[ix]->attr.gid.raw);
+
+       write_lock_irq(&table->rwlock);
+       entry = table->data_vec[ix];
+       entry->state = GID_TABLE_ENTRY_PENDING_DEL;
+       /*
+        * For non RoCE protocol, GID entry slot is ready to use.
+        */
+       if (!rdma_protocol_roce(ib_dev, port))
+               table->data_vec[ix] = NULL;
+       write_unlock_irq(&table->rwlock);
+
+       put_gid_entry_locked(entry);
+}
+
 /**
  * add_modify_gid - Add or modify GID table entry
  *
@@ -358,7 +391,7 @@ static int add_modify_gid(struct ib_gid_table *table,
         * this index.
         */
        if (is_gid_entry_valid(table->data_vec[attr->index]))
-               put_gid_entry(table->data_vec[attr->index]);
+               del_gid(attr->device, attr->port_num, table, attr->index);
 
        /*
         * Some HCA's report multiple GID entries with only one valid GID, and
@@ -386,39 +419,6 @@ static int add_modify_gid(struct ib_gid_table *table,
        return ret;
 }
 
-/**
- * del_gid - Delete GID table entry
- *
- * @ib_dev:    IB device whose GID entry to be deleted
- * @port:      Port number of the IB device
- * @table:     GID table of the IB device for a port
- * @ix:                GID entry index to delete
- *
- */
-static void del_gid(struct ib_device *ib_dev, u8 port,
-                   struct ib_gid_table *table, int ix)
-{
-       struct ib_gid_table_entry *entry;
-
-       lockdep_assert_held(&table->lock);
-
-       pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
-                ib_dev->name, port, ix,
-                table->data_vec[ix]->attr.gid.raw);
-
-       write_lock_irq(&table->rwlock);
-       entry = table->data_vec[ix];
-       entry->state = GID_TABLE_ENTRY_PENDING_DEL;
-       /*
-        * For non RoCE protocol, GID entry slot is ready to use.
-        */
-       if (!rdma_protocol_roce(ib_dev, port))
-               table->data_vec[ix] = NULL;
-       write_unlock_irq(&table->rwlock);
-
-       put_gid_entry_locked(entry);
-}
-
 /* rwlock should be read locked, or lock should be held */
 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
                    const struct ib_gid_attr *val, bool default_gid,
index 5f437d1570fb02d516b1ee60f0a4cee42053da50..21863ddde63e3040b285d9decd8a2ee1c47534b8 100644 (file)
@@ -1759,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
                mutex_lock(&mut);
                if (!ctx->closing) {
                        mutex_unlock(&mut);
+                       ucma_put_ctx(ctx);
+                       wait_for_completion(&ctx->comp);
                        /* rdma_destroy_id ensures that no event handlers are
                         * inflight for that id before releasing it.
                         */
index a21d5214afc367b260fd445642c55e6040d02dfb..e012ca80f9d196ddbb8723691ec4087a55c0d863 100644 (file)
@@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file,
 
        if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
            cmd->base.cur_qp_state > IB_QPS_ERR) ||
-           cmd->base.qp_state > IB_QPS_ERR) {
+           (cmd->base.attr_mask & IB_QP_STATE &&
+           cmd->base.qp_state > IB_QPS_ERR)) {
                ret = -EINVAL;
                goto release_qp;
        }
 
-       attr->qp_state            = cmd->base.qp_state;
-       attr->cur_qp_state        = cmd->base.cur_qp_state;
-       attr->path_mtu            = cmd->base.path_mtu;
-       attr->path_mig_state      = cmd->base.path_mig_state;
-       attr->qkey                = cmd->base.qkey;
-       attr->rq_psn              = cmd->base.rq_psn;
-       attr->sq_psn              = cmd->base.sq_psn;
-       attr->dest_qp_num         = cmd->base.dest_qp_num;
-       attr->qp_access_flags     = cmd->base.qp_access_flags;
-       attr->pkey_index          = cmd->base.pkey_index;
-       attr->alt_pkey_index      = cmd->base.alt_pkey_index;
-       attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
-       attr->max_rd_atomic       = cmd->base.max_rd_atomic;
-       attr->max_dest_rd_atomic  = cmd->base.max_dest_rd_atomic;
-       attr->min_rnr_timer       = cmd->base.min_rnr_timer;
-       attr->port_num            = cmd->base.port_num;
-       attr->timeout             = cmd->base.timeout;
-       attr->retry_cnt           = cmd->base.retry_cnt;
-       attr->rnr_retry           = cmd->base.rnr_retry;
-       attr->alt_port_num        = cmd->base.alt_port_num;
-       attr->alt_timeout         = cmd->base.alt_timeout;
-       attr->rate_limit          = cmd->rate_limit;
+       if (cmd->base.attr_mask & IB_QP_STATE)
+               attr->qp_state = cmd->base.qp_state;
+       if (cmd->base.attr_mask & IB_QP_CUR_STATE)
+               attr->cur_qp_state = cmd->base.cur_qp_state;
+       if (cmd->base.attr_mask & IB_QP_PATH_MTU)
+               attr->path_mtu = cmd->base.path_mtu;
+       if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
+               attr->path_mig_state = cmd->base.path_mig_state;
+       if (cmd->base.attr_mask & IB_QP_QKEY)
+               attr->qkey = cmd->base.qkey;
+       if (cmd->base.attr_mask & IB_QP_RQ_PSN)
+               attr->rq_psn = cmd->base.rq_psn;
+       if (cmd->base.attr_mask & IB_QP_SQ_PSN)
+               attr->sq_psn = cmd->base.sq_psn;
+       if (cmd->base.attr_mask & IB_QP_DEST_QPN)
+               attr->dest_qp_num = cmd->base.dest_qp_num;
+       if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
+               attr->qp_access_flags = cmd->base.qp_access_flags;
+       if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
+               attr->pkey_index = cmd->base.pkey_index;
+       if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+               attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
+       if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+               attr->max_rd_atomic = cmd->base.max_rd_atomic;
+       if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
+       if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
+               attr->min_rnr_timer = cmd->base.min_rnr_timer;
+       if (cmd->base.attr_mask & IB_QP_PORT)
+               attr->port_num = cmd->base.port_num;
+       if (cmd->base.attr_mask & IB_QP_TIMEOUT)
+               attr->timeout = cmd->base.timeout;
+       if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
+               attr->retry_cnt = cmd->base.retry_cnt;
+       if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
+               attr->rnr_retry = cmd->base.rnr_retry;
+       if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
+               attr->alt_port_num = cmd->base.alt_port_num;
+               attr->alt_timeout = cmd->base.alt_timeout;
+               attr->alt_pkey_index = cmd->base.alt_pkey_index;
+       }
+       if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
+               attr->rate_limit = cmd->rate_limit;
 
        if (cmd->base.attr_mask & IB_QP_AV)
                copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
index 6d974e2363df249c4291a3331d9d16f0a14232a6..50152c1b100452f7a4c8a9f733739ac25cbe777d 100644 (file)
@@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
                        list_del(&entry->obj_list);
                kfree(entry);
        }
+       file->ev_queue.is_closed = 1;
        spin_unlock_irq(&file->ev_queue.lock);
 
        uverbs_close_fd(filp);
index 73ea6f0db88fb5c2b2bbd698be684baf34dc5331..be854628a7c63149c05ed74f9f001a39fb1bc59d 100644 (file)
@@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
                kfree(rcu_dereference_protected(*slot, true));
                radix_tree_iter_delete(&uapi->radix, &iter, slot);
        }
+       kfree(uapi);
 }
 
 struct uverbs_api *uverbs_alloc_api(
index 20b9f31052bf974fe43d335730daa4268ad614de..85cd1a3593d610132ded3796b5b90384bdb0342c 100644 (file)
@@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
 /* Mutex to protect the list of bnxt_re devices added */
 static DEFINE_MUTEX(bnxt_re_dev_lock);
 static struct workqueue_struct *bnxt_re_wq;
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
 
 /* SR-IOV helper functions */
 
@@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p)
        if (!rdev)
                return;
 
-       bnxt_re_ib_unreg(rdev, false);
+       bnxt_re_ib_unreg(rdev);
 }
 
 static void bnxt_re_stop_irq(void *handle)
@@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
 /* Driver registration routines used to let the networking driver (bnxt_en)
  * to know that the RoCE driver is now installed
  */
-static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
 {
        struct bnxt_en_dev *en_dev;
        int rc;
@@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
                return -EINVAL;
 
        en_dev = rdev->en_dev;
-       /* Acquire rtnl lock if it is not invokded from netdev event */
-       if (lock_wait)
-               rtnl_lock();
 
        rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
                                                    BNXT_ROCE_ULP);
-       if (lock_wait)
-               rtnl_unlock();
        return rc;
 }
 
@@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
 
        en_dev = rdev->en_dev;
 
-       rtnl_lock();
        rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
                                                  &bnxt_re_ulp_ops, rdev);
-       rtnl_unlock();
        return rc;
 }
 
-static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
 {
        struct bnxt_en_dev *en_dev;
        int rc;
@@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
 
        en_dev = rdev->en_dev;
 
-       if (lock_wait)
-               rtnl_lock();
 
        rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
 
-       if (lock_wait)
-               rtnl_unlock();
        return rc;
 }
 
@@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
 
        num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
 
-       rtnl_lock();
        num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
                                                         rdev->msix_entries,
                                                         num_msix_want);
@@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
        }
        rdev->num_msix = num_msix_got;
 done:
-       rtnl_unlock();
        return rc;
 }
 
@@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
        fw_msg->timeout = timeout;
 }
 
-static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
-                                bool lock_wait)
+static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
 {
        struct bnxt_en_dev *en_dev = rdev->en_dev;
        struct hwrm_ring_free_input req = {0};
        struct hwrm_ring_free_output resp;
        struct bnxt_fw_msg fw_msg;
-       bool do_unlock = false;
        int rc = -EINVAL;
 
        if (!en_dev)
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       if (lock_wait) {
-               rtnl_lock();
-               do_unlock = true;
-       }
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
        req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
        if (rc)
                dev_err(rdev_to_dev(rdev),
                        "Failed to free HW ring:%d :%#x", req.ring_id, rc);
-       if (do_unlock)
-               rtnl_unlock();
        return rc;
 }
 
@@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       rtnl_lock();
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
        req.enables = 0;
        req.page_tbl_addr =  cpu_to_le64(dma_arr[0]);
@@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
        if (!rc)
                *fw_ring_id = le16_to_cpu(resp.ring_id);
 
-       rtnl_unlock();
        return rc;
 }
 
 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
-                                     u32 fw_stats_ctx_id, bool lock_wait)
+                                     u32 fw_stats_ctx_id)
 {
        struct bnxt_en_dev *en_dev = rdev->en_dev;
        struct hwrm_stat_ctx_free_input req = {0};
        struct bnxt_fw_msg fw_msg;
-       bool do_unlock = false;
        int rc = -EINVAL;
 
        if (!en_dev)
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       if (lock_wait) {
-               rtnl_lock();
-               do_unlock = true;
-       }
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
        req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
@@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
                dev_err(rdev_to_dev(rdev),
                        "Failed to free HW stats context %#x", rc);
 
-       if (do_unlock)
-               rtnl_unlock();
        return rc;
 }
 
@@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       rtnl_lock();
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
        req.update_period_ms = cpu_to_le32(1000);
@@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
        if (!rc)
                *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
 
-       rtnl_unlock();
        return rc;
 }
 
@@ -929,19 +897,19 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
        return rc;
 }
 
-static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
 {
        int i;
 
        for (i = 0; i < rdev->num_msix - 1; i++) {
-               bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait);
+               bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
                bnxt_qplib_free_nq(&rdev->nq[i]);
        }
 }
 
-static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
 {
-       bnxt_re_free_nq_res(rdev, lock_wait);
+       bnxt_re_free_nq_res(rdev);
 
        if (rdev->qplib_res.dpi_tbl.max) {
                bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
@@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
        return 0;
 }
 
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
 {
        int i, rc;
 
@@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
                cancel_delayed_work(&rdev->worker);
 
        bnxt_re_cleanup_res(rdev);
-       bnxt_re_free_res(rdev, lock_wait);
+       bnxt_re_free_res(rdev);
 
        if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
                rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to deinitialize RCFW: %#x", rc);
-               bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id,
-                                          lock_wait);
+               bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
                bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
                bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
-               bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait);
+               bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
                bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
        }
        if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
-               rc = bnxt_re_free_msix(rdev, lock_wait);
+               rc = bnxt_re_free_msix(rdev);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to free MSI-X vectors: %#x", rc);
        }
        if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
-               rc = bnxt_re_unregister_netdev(rdev, lock_wait);
+               rc = bnxt_re_unregister_netdev(rdev);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to unregister with netdev: %#x", rc);
@@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 {
        int i, j, rc;
 
+       bool locked;
+
+       /* Acquire rtnl lock through out this function */
+       rtnl_lock();
+       locked = true;
+
        /* Registered a new RoCE device instance to netdev */
        rc = bnxt_re_register_netdev(rdev);
        if (rc) {
@@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
                schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
        }
 
+       rtnl_unlock();
+       locked = false;
+
        /* Register ib dev */
        rc = bnxt_re_register_ib(rdev);
        if (rc) {
                pr_err("Failed to register with IB: %#x\n", rc);
                goto fail;
        }
+       set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
        dev_info(rdev_to_dev(rdev), "Device registered successfully");
        for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
                rc = device_create_file(&rdev->ibdev.dev,
@@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
                        goto fail;
                }
        }
-       set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
        ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
                         &rdev->active_width);
        set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 
        return 0;
 free_sctx:
-       bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true);
+       bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
 free_ctx:
        bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
 disable_rcfw:
        bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
 free_ring:
-       bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true);
+       bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
 free_rcfw:
        bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
 fail:
-       bnxt_re_ib_unreg(rdev, true);
+       if (!locked)
+               rtnl_lock();
+       bnxt_re_ib_unreg(rdev);
+       rtnl_unlock();
+
        return rc;
 }
 
@@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
                 */
                if (atomic_read(&rdev->sched_count) > 0)
                        goto exit;
-               bnxt_re_ib_unreg(rdev, false);
+               bnxt_re_ib_unreg(rdev);
                bnxt_re_remove_one(rdev);
                bnxt_re_dev_unreg(rdev);
                break;
@@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void)
                 */
                flush_workqueue(bnxt_re_wq);
                bnxt_re_dev_stop(rdev);
-               bnxt_re_ib_unreg(rdev, true);
+               /* Acquire the rtnl_lock as the L2 resources are freed here */
+               rtnl_lock();
+               bnxt_re_ib_unreg(rdev);
+               rtnl_unlock();
                bnxt_re_remove_one(rdev);
                bnxt_re_dev_unreg(rdev);
        }
index 2c19bf772451bfef693eaad1fa5a678e5cc9e067..e1668bcc2d13d71aba2f35021ec25cd693e79682 100644 (file)
@@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
        struct hfi1_devdata *dd = ppd->dd;
        struct send_context *sc;
        int i;
+       int sc_flags;
 
        if (flags & FREEZE_SELF)
                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
@@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
        /* notify all SDMA engines that they are going into a freeze */
        sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
 
+       sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
+                                             SCF_LINK_DOWN : 0);
        /* do halt pre-handling on all enabled send contexts */
        for (i = 0; i < dd->num_send_contexts; i++) {
                sc = dd->send_contexts[i].sc;
                if (sc && (sc->flags & SCF_ENABLED))
-                       sc_stop(sc, SCF_FROZEN | SCF_HALTED);
+                       sc_stop(sc, sc_flags);
        }
 
        /* Send context are frozen. Notify user space */
@@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
                add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
 
                handle_linkup_change(dd, 1);
+               pio_kernel_linkup(dd);
 
                /*
                 * After link up, a new link width will have been set.
index c2c1cba5b23be440bc292a205ca523d6962c4233..752057647f091734368f998c0173234d8a1f2dd0 100644 (file)
@@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
        unsigned long flags;
        int write = 1;  /* write sendctrl back */
        int flush = 0;  /* re-read sendctrl to make sure it is flushed */
+       int i;
 
        spin_lock_irqsave(&dd->sendctrl_lock, flags);
 
@@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
                reg |= SEND_CTRL_SEND_ENABLE_SMASK;
        /* Fall through */
        case PSC_DATA_VL_ENABLE:
+               mask = 0;
+               for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+                       if (!dd->vld[i].mtu)
+                               mask |= BIT_ULL(i);
                /* Disallow sending on VLs not enabled */
-               mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
-                               SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+               mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+                       SEND_CTRL_UNSUPPORTED_VL_SHIFT;
                reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
                break;
        case PSC_GLOBAL_DISABLE:
@@ -921,20 +926,18 @@ void sc_free(struct send_context *sc)
 void sc_disable(struct send_context *sc)
 {
        u64 reg;
-       unsigned long flags;
        struct pio_buf *pbuf;
 
        if (!sc)
                return;
 
        /* do all steps, even if already disabled */
-       spin_lock_irqsave(&sc->alloc_lock, flags);
+       spin_lock_irq(&sc->alloc_lock);
        reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
        reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
        sc->flags &= ~SCF_ENABLED;
        sc_wait_for_packet_egress(sc, 1);
        write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
-       spin_unlock_irqrestore(&sc->alloc_lock, flags);
 
        /*
         * Flush any waiters.  Once the context is disabled,
@@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc)
         * proceed with the flush.
         */
        udelay(1);
-       spin_lock_irqsave(&sc->release_lock, flags);
+       spin_lock(&sc->release_lock);
        if (sc->sr) {   /* this context has a shadow ring */
                while (sc->sr_tail != sc->sr_head) {
                        pbuf = &sc->sr[sc->sr_tail].pbuf;
@@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc)
                                sc->sr_tail = 0;
                }
        }
-       spin_unlock_irqrestore(&sc->release_lock, flags);
+       spin_unlock(&sc->release_lock);
+       spin_unlock_irq(&sc->alloc_lock);
 }
 
 /* return SendEgressCtxtStatus.PacketOccupancy */
@@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
                sc = dd->send_contexts[i].sc;
                if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
                        continue;
+               if (sc->flags & SCF_LINK_DOWN)
+                       continue;
 
                sc_enable(sc);  /* will clear the sc frozen flag */
        }
 }
 
+/**
+ * pio_kernel_linkup() - Re-enable send contexts after linkup event
+ * @dd: valid devive data
+ *
+ * When the link goes down, the freeze path is taken.  However, a link down
+ * event is different from a freeze because if the send context is re-enabled
+ * whowever is sending data will start sending data again, which will hang
+ * any QP that is sending data.
+ *
+ * The freeze path now looks at the type of event that occurs and takes this
+ * path for link down event.
+ */
+void pio_kernel_linkup(struct hfi1_devdata *dd)
+{
+       struct send_context *sc;
+       int i;
+
+       for (i = 0; i < dd->num_send_contexts; i++) {
+               sc = dd->send_contexts[i].sc;
+               if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
+                       continue;
+
+               sc_enable(sc);  /* will clear the sc link down flag */
+       }
+}
+
 /*
  * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
  * Returns:
@@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag)
 {
        unsigned long flags;
 
-       /* mark the context */
-       sc->flags |= flag;
-
        /* stop buffer allocations */
        spin_lock_irqsave(&sc->alloc_lock, flags);
+       /* mark the context */
+       sc->flags |= flag;
        sc->flags &= ~SCF_ENABLED;
        spin_unlock_irqrestore(&sc->alloc_lock, flags);
        wake_up(&sc->halt_wait);
index 058b08f459ab7947e53aa5ad676febc9999f7ade..aaf372c3e5d6a3cc0de82aaf9819c02b97bd195f 100644 (file)
@@ -139,6 +139,7 @@ struct send_context {
 #define SCF_IN_FREE 0x02
 #define SCF_HALTED  0x04
 #define SCF_FROZEN  0x08
+#define SCF_LINK_DOWN 0x10
 
 struct send_context_info {
        struct send_context *sc;        /* allocated working context */
@@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc);
 void pio_reset_all(struct hfi1_devdata *dd);
 void pio_freeze(struct hfi1_devdata *dd);
 void pio_kernel_unfreeze(struct hfi1_devdata *dd);
+void pio_kernel_linkup(struct hfi1_devdata *dd);
 
 /* global PIO send control operations */
 #define PSC_GLOBAL_ENABLE 0
index a3a7b33196d64158cd069cdf5d32c429f79f7b5d..5c88706121c1cb0faf2da79dae014113f7d0a0d9 100644 (file)
@@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
                        if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
                                if (++req->iov_idx == req->data_iovs) {
                                        ret = -EFAULT;
-                                       goto free_txreq;
+                                       goto free_tx;
                                }
                                iovec = &req->iovs[req->iov_idx];
                                WARN_ON(iovec->offset);
index 13374c727b142d61dc3f6bdb0603f958c6a1a0ca..a7c586a5589d642524f7e659c6a4dfa512eed4e2 100644 (file)
@@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        struct hfi1_pportdata *ppd;
        struct hfi1_devdata *dd;
        u8 sc5;
+       u8 sl;
 
        if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
            !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        /* test the mapping for validity */
        ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
        ppd = ppd_from_ibp(ibp);
-       sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
        dd = dd_from_ppd(ppd);
+
+       sl = rdma_ah_get_sl(ah_attr);
+       if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+               return -EINVAL;
+
+       sc5 = ibp->sl_to_sc[sl];
        if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
                return -EINVAL;
        return 0;
index ac116d63e4661adf03662bdfb2cd598cc179ce3c..f2f11e652dcd2a751d10397c8c65d6be8a53b53e 100644 (file)
@@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
                attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
        struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
        struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
        struct devx_obj *obj;
        int err;
 
@@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
 
        err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
        if (err)
-               goto obj_free;
+               goto obj_destroy;
 
        return 0;
 
+obj_destroy:
+       mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
 obj_free:
        kfree(obj);
        return err;
index 444d16520506a1773f01dc0b89087091d867e939..0b34e909505f5fa4c6a1404227a137ae94e54aa9 100644 (file)
@@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 {
        struct srp_target_port *target = host_to_target(scmnd->device->host);
        struct srp_rdma_ch *ch;
-       int i;
+       int i, j;
        u8 status;
 
        shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
-               for (i = 0; i < target->req_ring_size; ++i) {
-                       struct srp_request *req = &ch->req_ring[i];
+               for (j = 0; j < target->req_ring_size; ++j) {
+                       struct srp_request *req = &ch->req_ring[j];
 
                        srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
                }
index 6f62da2909ec0f07eb7cd7d1a76122bc11ed13a7..6caee807cafabf6955053ee817c30f70571d3ef5 100644 (file)
@@ -75,8 +75,7 @@ MODULE_LICENSE("GPL");
  */
 
 
-static unsigned char atakbd_keycode[0x72] = {  /* American layout */
-       [0]      = KEY_GRAVE,
+static unsigned char atakbd_keycode[0x73] = {  /* American layout */
        [1]      = KEY_ESC,
        [2]      = KEY_1,
        [3]      = KEY_2,
@@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = {       /* American layout */
        [38]     = KEY_L,
        [39]     = KEY_SEMICOLON,
        [40]     = KEY_APOSTROPHE,
-       [41]     = KEY_BACKSLASH,       /* FIXME, '#' */
+       [41]     = KEY_GRAVE,
        [42]     = KEY_LEFTSHIFT,
-       [43]     = KEY_GRAVE,           /* FIXME: '~' */
+       [43]     = KEY_BACKSLASH,
        [44]     = KEY_Z,
        [45]     = KEY_X,
        [46]     = KEY_C,
@@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = {     /* American layout */
        [66]     = KEY_F8,
        [67]     = KEY_F9,
        [68]     = KEY_F10,
-       [69]     = KEY_ESC,
-       [70]     = KEY_DELETE,
-       [71]     = KEY_KP7,
-       [72]     = KEY_KP8,
-       [73]     = KEY_KP9,
+       [71]     = KEY_HOME,
+       [72]     = KEY_UP,
        [74]     = KEY_KPMINUS,
-       [75]     = KEY_KP4,
-       [76]     = KEY_KP5,
-       [77]     = KEY_KP6,
+       [75]     = KEY_LEFT,
+       [77]     = KEY_RIGHT,
        [78]     = KEY_KPPLUS,
-       [79]     = KEY_KP1,
-       [80]     = KEY_KP2,
-       [81]     = KEY_KP3,
-       [82]     = KEY_KP0,
-       [83]     = KEY_KPDOT,
-       [90]     = KEY_KPLEFTPAREN,
-       [91]     = KEY_KPRIGHTPAREN,
-       [92]     = KEY_KPASTERISK,      /* FIXME */
-       [93]     = KEY_KPASTERISK,
-       [94]     = KEY_KPPLUS,
-       [95]     = KEY_HELP,
+       [80]     = KEY_DOWN,
+       [82]     = KEY_INSERT,
+       [83]     = KEY_DELETE,
        [96]     = KEY_102ND,
-       [97]     = KEY_KPASTERISK,      /* FIXME */
-       [98]     = KEY_KPSLASH,
+       [97]     = KEY_UNDO,
+       [98]     = KEY_HELP,
        [99]     = KEY_KPLEFTPAREN,
        [100]    = KEY_KPRIGHTPAREN,
        [101]    = KEY_KPSLASH,
        [102]    = KEY_KPASTERISK,
-       [103]    = KEY_UP,
-       [104]    = KEY_KPASTERISK,      /* FIXME */
-       [105]    = KEY_LEFT,
-       [106]    = KEY_RIGHT,
-       [107]    = KEY_KPASTERISK,      /* FIXME */
-       [108]    = KEY_DOWN,
-       [109]    = KEY_KPASTERISK,      /* FIXME */
-       [110]    = KEY_KPASTERISK,      /* FIXME */
-       [111]    = KEY_KPASTERISK,      /* FIXME */
-       [112]    = KEY_KPASTERISK,      /* FIXME */
-       [113]    = KEY_KPASTERISK       /* FIXME */
+       [103]    = KEY_KP7,
+       [104]    = KEY_KP8,
+       [105]    = KEY_KP9,
+       [106]    = KEY_KP4,
+       [107]    = KEY_KP5,
+       [108]    = KEY_KP6,
+       [109]    = KEY_KP1,
+       [110]    = KEY_KP2,
+       [111]    = KEY_KP3,
+       [112]    = KEY_KP0,
+       [113]    = KEY_KPDOT,
+       [114]    = KEY_KPENTER,
 };
 
 static struct input_dev *atakbd_dev;
@@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev;
 static void atakbd_interrupt(unsigned char scancode, char down)
 {
 
-       if (scancode < 0x72) {          /* scancodes < 0xf2 are keys */
+       if (scancode < 0x73) {          /* scancodes < 0xf3 are keys */
 
                // report raw events here?
 
                scancode = atakbd_keycode[scancode];
 
-               if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
-                       input_report_key(atakbd_dev, scancode, 1);
-                       input_report_key(atakbd_dev, scancode, 0);
-                       input_sync(atakbd_dev);
-               } else {
-                       input_report_key(atakbd_dev, scancode, down);
-                       input_sync(atakbd_dev);
-               }
-       } else                          /* scancodes >= 0xf2 are mouse data, most likely */
+               input_report_key(atakbd_dev, scancode, down);
+               input_sync(atakbd_dev);
+       } else                          /* scancodes >= 0xf3 are mouse data, most likely */
                printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
 
        return;
index 96a887f336982f7efc0d971e145ded52829b2dc8..eb14ddf693467b4619a9501aa5e712a9b45dfcdf 100644 (file)
@@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
        min = abs->minimum;
        max = abs->maximum;
 
-       if ((min != 0 || max != 0) && max <= min) {
+       if ((min != 0 || max != 0) && max < min) {
                printk(KERN_DEBUG
                       "%s: invalid abs[%02x] min:%d max:%d\n",
                       UINPUT_NAME, code, min, max);
index 44f57cf6675bbf10ed6fd5bc8b9900936e76a086..2d95e8d93cc761aefb102217473d940faf1e4d02 100644 (file)
@@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
 static const char * const middle_button_pnp_ids[] = {
        "LEN2131", /* ThinkPad P52 w/ NFC */
        "LEN2132", /* ThinkPad P52 */
+       "LEN2133", /* ThinkPad P72 w/ NFC */
+       "LEN2134", /* ThinkPad P72 */
        NULL
 };
 
index 80e69bb8283e4b417c677637889654e0b1cadf30..83ac8c128192846f21a9aeb40d80b39869a1c245 100644 (file)
@@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev)
        struct i2c_client *client = to_i2c_client(dev);
        int ret;
 
+       if (device_may_wakeup(dev))
+               return enable_irq_wake(client->irq);
+
        ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
        return ret > 0 ? 0 : ret;
 }
@@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
 
+       if (device_may_wakeup(dev))
+               return disable_irq_wake(client->irq);
+
        return egalax_wake_up_device(client);
 }
 
index 73e47d93e7a05a5f7b9ebc79096081c573656746..bee0dfb7b93b1b219b393405d32870cab9964cf1 100644 (file)
@@ -3069,7 +3069,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
                return 0;
 
        offset_mask = pte_pgsize - 1;
-       __pte       = *pte & PM_ADDR_MASK;
+       __pte       = __sme_clr(*pte & PM_ADDR_MASK);
 
        return (__pte & ~offset_mask) | (iova & offset_mask);
 }
index 83504dd8100ab2a80d7f0e737e50266de41add32..954dad29e6e8fca910b0ebd24171591f2acd0831 100644 (file)
@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
 
 extern struct workqueue_struct *bcache_wq;
+extern struct workqueue_struct *bch_journal_wq;
 extern struct mutex bch_register_lock;
 extern struct list_head bch_cache_sets;
 
index 6116bbf870d8ef9004717bf5d04803895d085bab..522c7426f3a05cee10df0e984521472849b9b707 100644 (file)
@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
 
                closure_get(&ca->set->cl);
                INIT_WORK(&ja->discard_work, journal_discard_work);
-               schedule_work(&ja->discard_work);
+               queue_work(bch_journal_wq, &ja->discard_work);
        }
 }
 
@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
                : &j->w[0];
 
        __closure_wake_up(&w->wait);
-       continue_at_nobarrier(cl, journal_write, system_wq);
+       continue_at_nobarrier(cl, journal_write, bch_journal_wq);
 }
 
 static void journal_write_unlock(struct closure *cl)
@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
                spin_unlock(&c->journal.lock);
 
                btree_flush_write(c);
-               continue_at(cl, journal_write, system_wq);
+               continue_at(cl, journal_write, bch_journal_wq);
                return;
        }
 
index 94c756c66bd7216a6d83b67eaef891269f965467..30ba9aeb5ee8345ac192e34e51e67beae2127950 100644 (file)
@@ -47,6 +47,7 @@ static int bcache_major;
 static DEFINE_IDA(bcache_device_idx);
 static wait_queue_head_t unregister_wait;
 struct workqueue_struct *bcache_wq;
+struct workqueue_struct *bch_journal_wq;
 
 #define BTREE_MAX_PAGES                (256 * 1024 / PAGE_SIZE)
 /* limitation of partitions number on single bcache device */
@@ -2341,6 +2342,9 @@ static void bcache_exit(void)
                kobject_put(bcache_kobj);
        if (bcache_wq)
                destroy_workqueue(bcache_wq);
+       if (bch_journal_wq)
+               destroy_workqueue(bch_journal_wq);
+
        if (bcache_major)
                unregister_blkdev(bcache_major, "bcache");
        unregister_reboot_notifier(&reboot);
@@ -2370,6 +2374,10 @@ static int __init bcache_init(void)
        if (!bcache_wq)
                goto err;
 
+       bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+       if (!bch_journal_wq)
+               goto err;
+
        bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
        if (!bcache_kobj)
                goto err;
index 69dddeab124c2e1ac075dc24c14ebe725647e3a3..5936de71883fb7f637b0192bf3144910d6e193d5 100644 (file)
@@ -1455,8 +1455,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
                if (hints_valid) {
                        r = dm_array_cursor_next(&cmd->hint_cursor);
                        if (r) {
-                               DMERR("dm_array_cursor_next for hint failed");
-                               goto out;
+                               dm_array_cursor_end(&cmd->hint_cursor);
+                               hints_valid = false;
                        }
                }
 
index a534133717254a88eb91e025c7731e408fbbdfad..e13d991e9fb52eff6176e2a275c8a8d2342b6701 100644 (file)
@@ -3009,8 +3009,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
 
 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
 {
-       if (from_cblock(new_size) > from_cblock(cache->cache_size))
-               return true;
+       if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+               if (cache->sized) {
+                       DMERR("%s: unable to extend cache due to missing cache table reload",
+                             cache_device_name(cache));
+                       return false;
+               }
+       }
 
        /*
         * We can't drop a dirty block when shrinking the cache.
index d94ba6f72ff59e3723cc67337ff6bca21a472fda..419362c2d8aca1b95e745633f937ecd2a3325aed 100644 (file)
@@ -806,19 +806,19 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 }
 
 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
-                        const char *attached_handler_name, char **error)
+                        const char **attached_handler_name, char **error)
 {
        struct request_queue *q = bdev_get_queue(bdev);
        int r;
 
        if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
 retain:
-               if (attached_handler_name) {
+               if (*attached_handler_name) {
                        /*
                         * Clear any hw_handler_params associated with a
                         * handler that isn't already attached.
                         */
-                       if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+                       if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
                                kfree(m->hw_handler_params);
                                m->hw_handler_params = NULL;
                        }
@@ -830,7 +830,8 @@ static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
                         * handler instead of the original table passed in.
                         */
                        kfree(m->hw_handler_name);
-                       m->hw_handler_name = attached_handler_name;
+                       m->hw_handler_name = *attached_handler_name;
+                       *attached_handler_name = NULL;
                }
        }
 
@@ -867,7 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
        struct pgpath *p;
        struct multipath *m = ti->private;
        struct request_queue *q;
-       const char *attached_handler_name;
+       const char *attached_handler_name = NULL;
 
        /* we need at least a path arg */
        if (as->argc < 1) {
@@ -890,7 +891,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
        attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
        if (attached_handler_name || m->hw_handler_name) {
                INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
-               r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
+               r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
                if (r) {
                        dm_put_device(ti, p->path.dev);
                        goto bad;
@@ -905,6 +906,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 
        return p;
  bad:
+       kfree(attached_handler_name);
        free_pgpath(p);
        return ERR_PTR(r);
 }
index 5ba067fa0c729bc89b7789bd35648b1189003b27..c44925e4e4813d246d0d208fef5587019e4afb17 100644 (file)
@@ -3353,7 +3353,7 @@ static const char *sync_str(enum sync_state state)
 };
 
 /* Return enum sync_state for @mddev derived from @recovery flags */
-static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
+static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
 {
        if (test_bit(MD_RECOVERY_FROZEN, &recovery))
                return st_frozen;
index 74f6770c70b12404e965345ec561318bf2225fcb..20b0776e39ef3307aa5c418016afff4758850a68 100644 (file)
@@ -832,10 +832,8 @@ static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
        if (r) {
                DMERR("could not get size of metadata device");
                pmd->metadata_reserve = max_blocks;
-       } else {
-               sector_div(total, 10);
-               pmd->metadata_reserve = min(max_blocks, total);
-       }
+       } else
+               pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
 }
 
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
index 127fe6eb91d9832289124a399b03b2d6c88715c3..a3ef1f50a4b3496dcfbe7cb4332a3bab9a3acc56 100644 (file)
@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
        if (sev == NULL)
                return;
 
-       /*
-        * If the event has been added to the fh->subscribed list, but its
-        * add op has not completed yet elems will be 0, treat this as
-        * not being subscribed.
-        */
-       if (!sev->elems)
-               return;
-
        /* Increase event sequence number on fh. */
        fh->sequence++;
 
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        struct v4l2_subscribed_event *sev, *found_ev;
        unsigned long flags;
        unsigned i;
+       int ret = 0;
 
        if (sub->type == V4L2_EVENT_ALL)
                return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        sev->flags = sub->flags;
        sev->fh = fh;
        sev->ops = ops;
+       sev->elems = elems;
+
+       mutex_lock(&fh->subscribe_lock);
 
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
        found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
-       if (!found_ev)
-               list_add(&sev->list, &fh->subscribed);
        spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
        if (found_ev) {
+               /* Already listening */
                kvfree(sev);
-               return 0; /* Already listening */
+               goto out_unlock;
        }
 
        if (sev->ops && sev->ops->add) {
-               int ret = sev->ops->add(sev, elems);
+               ret = sev->ops->add(sev, elems);
                if (ret) {
-                       sev->ops = NULL;
-                       v4l2_event_unsubscribe(fh, sub);
-                       return ret;
+                       kvfree(sev);
+                       goto out_unlock;
                }
        }
 
-       /* Mark as ready for use */
-       sev->elems = elems;
+       spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+       list_add(&sev->list, &fh->subscribed);
+       spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
-       return 0;
+out_unlock:
+       mutex_unlock(&fh->subscribe_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
                return 0;
        }
 
+       mutex_lock(&fh->subscribe_lock);
+
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 
        sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
        if (sev && sev->ops && sev->ops->del)
                sev->ops->del(sev);
 
+       mutex_unlock(&fh->subscribe_lock);
+
        kvfree(sev);
 
        return 0;
index 3895999bf8805c3c208a4f042cb59b786a6ec44c..c91a7bd3ecfc7d14853b56a8de273d35ac0ff870 100644 (file)
@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
        INIT_LIST_HEAD(&fh->available);
        INIT_LIST_HEAD(&fh->subscribed);
        fh->sequence = -1;
+       mutex_init(&fh->subscribe_lock);
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_init);
 
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
                return;
        v4l_disable_media_source(fh->vdev);
        v4l2_event_unsubscribe_all(fh);
+       mutex_destroy(&fh->subscribe_lock);
        fh->vdev = NULL;
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_exit);
index abf9e884386c4cc42edee4113bb1a989167dba55..f57f5de5420647619714c65861896252700d302c 100644 (file)
@@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
                        host->caps |= MMC_CAP_NEEDS_POLL;
 
                ret = mmc_gpiod_request_cd(host, "cd", 0, true,
-                                          cd_debounce_delay_ms,
+                                          cd_debounce_delay_ms * 1000,
                                           &cd_gpio_invert);
                if (!ret)
                        dev_info(host->parent, "Got CD GPIO\n");
index 2a833686784b6b459d9744b366ef22cb5ca1279c..86803a3a04dc9609a0c55de2df03f3a9e8cb1341 100644 (file)
@@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
        if (debounce) {
                ret = gpiod_set_debounce(desc, debounce);
                if (ret < 0)
-                       ctx->cd_debounce_delay_ms = debounce;
+                       ctx->cd_debounce_delay_ms = debounce / 1000;
        }
 
        if (gpio_invert)
index 890f192dedbdcc9cb693c2c4159c1b0727cad216..5389c48218820166209a7de463c01084366b1fe4 100644 (file)
@@ -498,7 +498,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
 
 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
 {
-       if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
+       if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
+           of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
            !soc_device_match(gen3_soc_whitelist))
                return -ENODEV;
 
index 0d87e11e7f1d84537fe43d95249b1bd3a2ce291d..ee28ec9e0abaddd13053da9fda5c0cefc722555d 100644 (file)
@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
 static void bond_slave_arr_handler(struct work_struct *work);
 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
                                  int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
 
 /*---------------------------- General routines -----------------------------*/
 
@@ -1170,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
                }
        }
 
-       /* don't change skb->dev for link-local packets */
-       if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+       /* Link-local multicast packets should be passed to the
+        * stack on the link they arrive as well as pass them to the
+        * bond-master device. These packets are mostly usable when
+        * stack receives it with the link on which they arrive
+        * (e.g. LLDP) they also must be available on master. Some of
+        * the use cases include (but are not limited to): LLDP agents
+        * that must be able to operate both on enslaved interfaces as
+        * well as on bonds themselves; linux bridges that must be able
+        * to process/pass BPDUs from attached bonds when any kind of
+        * STP version is enabled on the network.
+        */
+       if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
+               struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+               if (nskb) {
+                       nskb->dev = bond->dev;
+                       nskb->queue_mapping = 0;
+                       netif_rx(nskb);
+               }
                return RX_HANDLER_PASS;
+       }
        if (bond_should_deliver_exact_match(skb, slave, bond))
                return RX_HANDLER_EXACT;
 
@@ -1269,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
                        return NULL;
                }
        }
+       INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
        return slave;
 }
 
@@ -1276,6 +1297,7 @@ static void bond_free_slave(struct slave *slave)
 {
        struct bonding *bond = bond_get_bond_by_slave(slave);
 
+       cancel_delayed_work_sync(&slave->notify_work);
        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                kfree(SLAVE_AD_INFO(slave));
 
@@ -1297,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
        info->link_failure_count = slave->link_failure_count;
 }
 
-static void bond_netdev_notify(struct net_device *dev,
-                              struct netdev_bonding_info *info)
-{
-       rtnl_lock();
-       netdev_bonding_info_change(dev, info);
-       rtnl_unlock();
-}
-
 static void bond_netdev_notify_work(struct work_struct *_work)
 {
-       struct netdev_notify_work *w =
-               container_of(_work, struct netdev_notify_work, work.work);
+       struct slave *slave = container_of(_work, struct slave,
+                                          notify_work.work);
+
+       if (rtnl_trylock()) {
+               struct netdev_bonding_info binfo;
 
-       bond_netdev_notify(w->dev, &w->bonding_info);
-       dev_put(w->dev);
-       kfree(w);
+               bond_fill_ifslave(slave, &binfo.slave);
+               bond_fill_ifbond(slave->bond, &binfo.master);
+               netdev_bonding_info_change(slave->dev, &binfo);
+               rtnl_unlock();
+       } else {
+               queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+       }
 }
 
 void bond_queue_slave_event(struct slave *slave)
 {
-       struct bonding *bond = slave->bond;
-       struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
-       if (!nnw)
-               return;
-
-       dev_hold(slave->dev);
-       nnw->dev = slave->dev;
-       bond_fill_ifslave(slave, &nnw->bonding_info.slave);
-       bond_fill_ifbond(bond, &nnw->bonding_info.master);
-       INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
-       queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+       queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
 }
 
 void bond_lower_state_changed(struct slave *slave)
index d93c790bfbe8d5c37de8183f3527962e72e6e175..ad534b90ef21b6a269f57472fe5beee70f6fd917 100644 (file)
@@ -1107,7 +1107,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
                b53_get_vlan_entry(dev, vid, vl);
 
                vl->members |= BIT(port);
-               if (untagged)
+               if (untagged && !dsa_is_cpu_port(ds, port))
                        vl->untag |= BIT(port);
                else
                        vl->untag &= ~BIT(port);
@@ -1149,7 +1149,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
                                pvid = 0;
                }
 
-               if (untagged)
+               if (untagged && !dsa_is_cpu_port(ds, port))
                        vl->untag &= ~(BIT(port));
 
                b53_set_vlan_entry(dev, vid, vl);
index 29b5774dd32d47e5ad2c43e78b599493f011d129..25621a218f20754c29963c7ded3075c0c89a232c 100644 (file)
@@ -2185,25 +2185,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ena_netpoll(struct net_device *netdev)
-{
-       struct ena_adapter *adapter = netdev_priv(netdev);
-       int i;
-
-       /* Dont schedule NAPI if the driver is in the middle of reset
-        * or netdev is down.
-        */
-
-       if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
-           test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
-               return;
-
-       for (i = 0; i < adapter->num_queues; i++)
-               napi_schedule(&adapter->ena_napi[i].napi);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
                            struct net_device *sb_dev,
                            select_queue_fallback_t fallback)
@@ -2369,9 +2350,6 @@ static const struct net_device_ops ena_netdev_ops = {
        .ndo_change_mtu         = ena_change_mtu,
        .ndo_set_mac_address    = NULL,
        .ndo_validate_addr      = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ena_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
 };
 
 static int ena_device_validate_params(struct ena_adapter *adapter,
index 116997a8b5930600f57f330b140785f2acd411ad..00332a1ea84b9e770febc38dd69af42e3d5bba90 100644 (file)
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
        int i, ret;
        unsigned long esar_base;
        unsigned char *esar;
+       const char *desc;
 
        if (dec_lance_debug && version_printed++ == 0)
                printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
         */
        switch (type) {
        case ASIC_LANCE:
-               printk("%s: IOASIC onboard LANCE", name);
+               desc = "IOASIC onboard LANCE";
                break;
        case PMAD_LANCE:
-               printk("%s: PMAD-AA", name);
+               desc = "PMAD-AA";
                break;
        case PMAX_LANCE:
-               printk("%s: PMAX onboard LANCE", name);
+               desc = "PMAX onboard LANCE";
                break;
        }
        for (i = 0; i < 6; i++)
                dev->dev_addr[i] = esar[i * 4];
 
-       printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+       printk("%s: %s, addr = %pM, irq = %d\n",
+              name, desc, dev->dev_addr, dev->irq);
 
        dev->netdev_ops = &lance_netdev_ops;
        dev->watchdog_timeo = 5*HZ;
index 147045757b103309e2656f36bc3337acef7b383f..c57238fce86377b0095b8bfae082b1e776d06c20 100644 (file)
@@ -1069,9 +1069,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
 {
        u32 reg;
 
-       /* Stop monitoring MPD interrupt */
-       intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
        /* Disable RXCHK, active filters and Broadcom tag matching */
        reg = rxchk_readl(priv, RXCHK_CONTROL);
        reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1081,6 +1078,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
        /* Clear the MagicPacket detection logic */
        mpd_enable_set(priv, false);
 
+       reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+       if (reg & INTRL2_0_MPD)
+               netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+       if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+               reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+                                 RXCHK_BRCM_TAG_MATCH_MASK;
+               netdev_info(priv->netdev,
+                           "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+       }
+
        netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
 }
 
@@ -1105,7 +1113,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        struct bcm_sysport_tx_ring *txr;
        unsigned int ring, ring_bit;
-       u32 reg;
 
        priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
                          ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1131,16 +1138,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
                bcm_sysport_tx_reclaim_all(priv);
 
-       if (priv->irq0_stat & INTRL2_0_MPD)
-               netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
-
-       if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
-               reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
-                                 RXCHK_BRCM_TAG_MATCH_MASK;
-               netdev_info(priv->netdev,
-                           "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
-       }
-
        if (!priv->is_lite)
                goto out;
 
@@ -2641,9 +2638,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
        /* UniMAC receive needs to be turned on */
        umac_enable_set(priv, CMD_RX_EN, 1);
 
-       /* Enable the interrupt wake-up source */
-       intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
        netif_dbg(priv, wol, ndev, "entered WOL mode\n");
 
        return 0;
index 61957b0bbd8c9f46773ff26ac9d14759b96c3960..e2d92548226ad01eda69694876a85478e539c930 100644 (file)
@@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
                        tx_pkts++;
                        /* return full budget so NAPI will complete. */
-                       if (unlikely(tx_pkts > bp->tx_wake_thresh))
+                       if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
                                rx_pkts = budget;
+                               raw_cons = NEXT_RAW_CMP(raw_cons);
+                               break;
+                       }
                } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
                        if (likely(budget))
                                rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
@@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                }
                raw_cons = NEXT_RAW_CMP(raw_cons);
 
-               if (rx_pkts == budget)
+               if (rx_pkts && rx_pkts == budget)
                        break;
        }
 
@@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
        while (1) {
                work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
 
-               if (work_done >= budget)
+               if (work_done >= budget) {
+                       if (!budget)
+                               BNXT_CP_DB_REARM(cpr->cp_doorbell,
+                                                cpr->cp_raw_cons);
                        break;
+               }
 
                if (!bnxt_has_work(bp, cpr)) {
                        if (napi_complete_done(napi, work_done))
@@ -3010,10 +3017,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
 {
        struct pci_dev *pdev = bp->pdev;
 
-       dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
-                         bp->hwrm_cmd_resp_dma_addr);
-
-       bp->hwrm_cmd_resp_addr = NULL;
+       if (bp->hwrm_cmd_resp_addr) {
+               dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+                                 bp->hwrm_cmd_resp_dma_addr);
+               bp->hwrm_cmd_resp_addr = NULL;
+       }
 }
 
 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -4643,7 +4651,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
                                      FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
                enables |= ring_grps ?
                           FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
-               enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+               enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
 
                req->num_rx_rings = cpu_to_le16(rx_rings);
                req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -8614,7 +8622,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
        *max_tx = hw_resc->max_tx_rings;
        *max_rx = hw_resc->max_rx_rings;
        *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
-                       hw_resc->max_irqs);
+                       hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
        *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
        max_ring_grps = hw_resc->max_hw_ring_grps;
        if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -9050,6 +9058,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        bnxt_clear_int_mode(bp);
 
 init_err_pci_clean:
+       bnxt_free_hwrm_resources(bp);
        bnxt_cleanup_pci(bp);
 
 init_err_free:
index ddc98c359488c29e03defe70ebac5a03d6a9415d..a85d2be986af48a7143566a5aa9739129275b126 100644 (file)
@@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
        for (i = 0; i < max_tc; i++) {
-               u8 qidx;
+               u8 qidx = bp->tc_to_qidx[i];
 
                req.enables |= cpu_to_le32(
-                       QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+                       QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
+                       qidx);
 
                memset(&cos2bw, 0, sizeof(cos2bw));
-               qidx = bp->tc_to_qidx[i];
                cos2bw.queue_id = bp->q_info[qidx].queue_id;
                if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
                        cos2bw.tsa =
index f1a86b42261796865fc659bc042c6763a2393a6f..58b9744c405805d6c4ebf9c1bf5c840cee3ee3d3 100644 (file)
@@ -2160,6 +2160,7 @@ static void macb_configure_dma(struct macb *bp)
                else
                        dmacfg &= ~GEM_BIT(TXCOEN);
 
+               dmacfg &= ~GEM_BIT(ADDR64);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
                if (bp->hw_dma_cap & HW_DMA_CAP_64B)
                        dmacfg |= GEM_BIT(ADDR64);
index a19172dbe6be272d9a168302bab18f551a687a17..c34ea385fe4a5b40d9f2905780ed6224fccfb11c 100644 (file)
@@ -2159,6 +2159,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EPERM;
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_SET_QSET_PARAMS)
+                       return -EINVAL;
                if (t.qset_idx >= SGE_QSETS)
                        return -EINVAL;
                if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@@ -2258,6 +2260,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
 
+               if (t.cmd != CHELSIO_GET_QSET_PARAMS)
+                       return -EINVAL;
+
                /* Display qsets for all ports when offload enabled */
                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
                        q1 = 0;
@@ -2303,6 +2308,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EBUSY;
                if (copy_from_user(&edata, useraddr, sizeof(edata)))
                        return -EFAULT;
+               if (edata.cmd != CHELSIO_SET_QSET_NUM)
+                       return -EINVAL;
                if (edata.val < 1 ||
                        (edata.val > 1 && !(adapter->flags & USING_MSIX)))
                        return -EINVAL;
@@ -2343,6 +2350,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EPERM;
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_LOAD_FW)
+                       return -EINVAL;
                /* Check t.len sanity ? */
                fw_data = memdup_user(useraddr + sizeof(t), t.len);
                if (IS_ERR(fw_data))
@@ -2366,6 +2375,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EBUSY;
                if (copy_from_user(&m, useraddr, sizeof(m)))
                        return -EFAULT;
+               if (m.cmd != CHELSIO_SETMTUTAB)
+                       return -EINVAL;
                if (m.nmtus != NMTUS)
                        return -EINVAL;
                if (m.mtus[0] < 81)     /* accommodate SACK */
@@ -2407,6 +2418,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EBUSY;
                if (copy_from_user(&m, useraddr, sizeof(m)))
                        return -EFAULT;
+               if (m.cmd != CHELSIO_SET_PM)
+                       return -EINVAL;
                if (!is_power_of_2(m.rx_pg_sz) ||
                        !is_power_of_2(m.tx_pg_sz))
                        return -EINVAL; /* not power of 2 */
@@ -2440,6 +2453,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EIO;    /* need the memory controllers */
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_GET_MEM)
+                       return -EINVAL;
                if ((t.addr & 7) || (t.len & 7))
                        return -EINVAL;
                if (t.mem_id == MEM_CM)
@@ -2492,6 +2507,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EAGAIN;
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_SET_TRACE_FILTER)
+                       return -EINVAL;
 
                tp = (const struct trace_params *)&t.sip;
                if (t.config_tx)
index b8f75a22fb6c97183d89c97f7ce2a9f0cd5ead5d..f152da1ce0464c5c065010813213e5f60eb111af 100644 (file)
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
 };
 
 struct cpl_abort_req_rss6 {
-       WR_HDR;
        union opcode_tid ot;
        __be32 srqidx_status;
 };
index 74d122616e76a2d876793a610dd6672e3e8b9910..534787291b44f17a6d47c480e35ee7da5e2e6319 100644 (file)
@@ -4002,8 +4002,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
        netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                                   NETIF_F_TSO | NETIF_F_TSO6 |
                                   NETIF_F_GSO_UDP_TUNNEL;
-       netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
-       netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
 
        dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
                 be16_to_cpu(port));
@@ -4025,8 +4023,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
        adapter->vxlan_port = 0;
 
        netdev->hw_enc_features = 0;
-       netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
-       netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
 }
 
 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
@@ -5320,6 +5316,7 @@ static void be_netdev_init(struct net_device *netdev)
        struct be_adapter *adapter = netdev_priv(netdev);
 
        netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+               NETIF_F_GSO_UDP_TUNNEL |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
                NETIF_F_HW_VLAN_CTAG_TX;
        if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
index 2708297e7795929e798ee73e5f4cc2c1907be12d..bf9b9fd6d2a07c720597fb72d1d6c6091a3369fd 100644 (file)
@@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
                napi_disable(&fep->napi);
                netif_tx_lock_bh(ndev);
                fec_restart(ndev);
-               netif_wake_queue(ndev);
+               netif_tx_wake_all_queues(ndev);
                netif_tx_unlock_bh(ndev);
                napi_enable(&fep->napi);
        }
@@ -1273,7 +1273,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 
                /* Since we have freed up a buffer, the ring is no longer full
                 */
-               if (netif_queue_stopped(ndev)) {
+               if (netif_tx_queue_stopped(nq)) {
                        entries_free = fec_enet_get_free_txdesc_num(txq);
                        if (entries_free >= txq->tx_wake_threshold)
                                netif_tx_wake_queue(nq);
@@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                        napi_disable(&fep->napi);
                        netif_tx_lock_bh(ndev);
                        fec_restart(ndev);
-                       netif_wake_queue(ndev);
+                       netif_tx_wake_all_queues(ndev);
                        netif_tx_unlock_bh(ndev);
                        napi_enable(&fep->napi);
                }
@@ -2247,7 +2247,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
                napi_disable(&fep->napi);
                netif_tx_lock_bh(ndev);
                fec_restart(ndev);
-               netif_wake_queue(ndev);
+               netif_tx_wake_all_queues(ndev);
                netif_tx_unlock_bh(ndev);
                napi_enable(&fep->napi);
        }
index a051e582d541ad2e2191567b9b3b3d7d69a90fc0..79d03f8ee7b180d2cab9a2a647254461c0a0cb08 100644 (file)
@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
        if (cb->type == DESC_TYPE_SKB)
                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
                                 ring_to_dma_dir(ring));
-       else
+       else if (cb->length)
                dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
                               ring_to_dma_dir(ring));
 }
index f56855e63c961333f20f842a3558a920d201ccc9..28e907831b0eddbf760e0edb579ae7ae708520e0 100644 (file)
@@ -40,9 +40,9 @@
 #define SKB_TMP_LEN(SKB) \
        (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
 
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
-                        int size, dma_addr_t dma, int frag_end,
-                        int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+                           int send_sz, dma_addr_t dma, int frag_end,
+                           int buf_num, enum hns_desc_type type, int mtu)
 {
        struct hnae_desc *desc = &ring->desc[ring->next_to_use];
        struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
        desc_cb->type = type;
 
        desc->addr = cpu_to_le64(dma);
-       desc->tx.send_size = cpu_to_le16((u16)size);
+       desc->tx.send_size = cpu_to_le16((u16)send_sz);
 
        /* config bd buffer end */
        hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
        ring_ptr_move_fw(ring, next_to_use);
 }
 
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+                        int size, dma_addr_t dma, int frag_end,
+                        int buf_num, enum hns_desc_type type, int mtu)
+{
+       fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+                       buf_num, type, mtu);
+}
+
 static const struct acpi_device_id hns_enet_acpi_match[] = {
        { "HISI00C1", 0 },
        { "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
 
        /* when the frag size is bigger than hardware, split this frag */
        for (k = 0; k < frag_buf_num; k++)
-               fill_v2_desc(ring, priv,
-                            (k == frag_buf_num - 1) ?
+               fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+                               (k == frag_buf_num - 1) ?
                                        sizeoflast : BD_MAX_SEND_SIZE,
-                            dma + BD_MAX_SEND_SIZE * k,
-                            frag_end && (k == frag_buf_num - 1) ? 1 : 0,
-                            buf_num,
-                            (type == DESC_TYPE_SKB && !k) ?
+                               dma + BD_MAX_SEND_SIZE * k,
+                               frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+                               buf_num,
+                               (type == DESC_TYPE_SKB && !k) ?
                                        DESC_TYPE_SKB : DESC_TYPE_PAGE,
-                            mtu);
+                               mtu);
 }
 
 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
        return phy_mii_ioctl(phy_dev, ifr, cmd);
 }
 
-/* use only for netconsole to poll with the device without interrupt */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hns_nic_poll_controller(struct net_device *ndev)
-{
-       struct hns_nic_priv *priv = netdev_priv(ndev);
-       unsigned long flags;
-       int i;
-
-       local_irq_save(flags);
-       for (i = 0; i < priv->ae_handle->q_num * 2; i++)
-               napi_schedule(&priv->ring_data[i].napi);
-       local_irq_restore(flags);
-}
-#endif
-
 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
                                    struct net_device *ndev)
 {
@@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
        .ndo_set_features = hns_nic_set_features,
        .ndo_fix_features = hns_nic_fix_features,
        .ndo_get_stats64 = hns_nic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = hns_nic_poll_controller,
-#endif
        .ndo_set_rx_mode = hns_nic_set_rx_mode,
        .ndo_select_queue = hns_nic_select_queue,
 };
index 09e9da10b786549b6232d8069c4e45857b95fd8c..4a8f82938ed5b87c8da6b09e88e08d387c652f0c 100644 (file)
@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
        stats->tx_errors  = nic_tx_stats->tx_dropped;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hinic_netpoll(struct net_device *netdev)
-{
-       struct hinic_dev *nic_dev = netdev_priv(netdev);
-       int i, num_qps;
-
-       num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
-       for (i = 0; i < num_qps; i++) {
-               struct hinic_txq *txq = &nic_dev->txqs[i];
-               struct hinic_rxq *rxq = &nic_dev->rxqs[i];
-
-               napi_schedule(&txq->napi);
-               napi_schedule(&rxq->napi);
-       }
-}
-#endif
-
 static const struct net_device_ops hinic_netdev_ops = {
        .ndo_open = hinic_open,
        .ndo_stop = hinic_close,
@@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
        .ndo_start_xmit = hinic_xmit_frame,
        .ndo_tx_timeout = hinic_tx_timeout,
        .ndo_get_stats64 = hinic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = hinic_netpoll,
-#endif
 };
 
 static void netdev_features_init(struct net_device *netdev)
index ba580bfae512326d346e96e67718bec10ed716cc..03f64f40b2a3e0a3d9f3432cb6fbdd7bcd264a4c 100644 (file)
@@ -921,17 +921,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
        return rx;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ehea_netpoll(struct net_device *dev)
-{
-       struct ehea_port *port = netdev_priv(dev);
-       int i;
-
-       for (i = 0; i < port->num_def_qps; i++)
-               napi_schedule(&port->port_res[i].napi);
-}
-#endif
-
 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
 {
        struct ehea_port_res *pr = param;
@@ -2953,9 +2942,6 @@ static const struct net_device_ops ehea_netdev_ops = {
        .ndo_open               = ehea_open,
        .ndo_stop               = ehea_stop,
        .ndo_start_xmit         = ehea_start_xmit,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ehea_netpoll,
-#endif
        .ndo_get_stats64        = ehea_get_stats64,
        .ndo_set_mac_address    = ehea_set_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
index 4f0daf67b18df2dcf11d7a406ebc1982e0fee466..699ef942b615c3a22053ba1419399317a6642cfa 100644 (file)
@@ -2207,19 +2207,6 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
        return frames_processed;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ibmvnic_netpoll_controller(struct net_device *dev)
-{
-       struct ibmvnic_adapter *adapter = netdev_priv(dev);
-       int i;
-
-       replenish_pools(netdev_priv(dev));
-       for (i = 0; i < adapter->req_rx_queues; i++)
-               ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
-                                    adapter->rx_scrq[i]);
-}
-#endif
-
 static int wait_for_reset(struct ibmvnic_adapter *adapter)
 {
        int rc, ret;
@@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
        .ndo_set_mac_address    = ibmvnic_set_mac,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = ibmvnic_tx_timeout,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ibmvnic_netpoll_controller,
-#endif
        .ndo_change_mtu         = ibmvnic_change_mtu,
        .ndo_features_check     = ibmvnic_features_check,
 };
index f27d73a7bf16f084ea8f4ab12d905b031f514cf8..6cdd58d9d461bd34ab62e5f68a43141f5cd70323 100644 (file)
@@ -3196,11 +3196,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                return budget;
 
        /* all work done, exit the polling mode */
-       napi_complete_done(napi, work_done);
-       if (adapter->rx_itr_setting & 1)
-               ixgbe_set_itr(q_vector);
-       if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
+       if (likely(napi_complete_done(napi, work_done))) {
+               if (adapter->rx_itr_setting & 1)
+                       ixgbe_set_itr(q_vector);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       ixgbe_irq_enable_queues(adapter,
+                                               BIT_ULL(q_vector->v_idx));
+       }
 
        return min(work_done, budget - 1);
 }
index 38cc01beea79ee53e8cd2970092f0575b1b1f374..a74002b43b5183baa4d829b489a1c757e60ae209 100644 (file)
@@ -1725,7 +1725,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
 }
 
 /* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
                               int ip_hdr_len, int l4_proto)
 {
        u32 command;
@@ -2600,14 +2600,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                int ip_hdr_len = 0;
                u8 l4_proto;
+               __be16 l3_proto = vlan_get_protocol(skb);
 
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (l3_proto == htons(ETH_P_IP)) {
                        struct iphdr *ip4h = ip_hdr(skb);
 
                        /* Calculate IPv4 checksum and L4 checksum */
                        ip_hdr_len = ip4h->ihl;
                        l4_proto = ip4h->protocol;
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (l3_proto == htons(ETH_P_IPV6)) {
                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
                        /* Read l4_protocol from one of IPv6 extra headers */
@@ -2619,7 +2620,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
                }
 
                return mvpp2_txq_desc_csum(skb_network_offset(skb),
-                               skb->protocol, ip_hdr_len, l4_proto);
+                                          l3_proto, ip_hdr_len, l4_proto);
        }
 
        return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
index db2cfcd21d43c52445ed66438a2b41be53b3f011..0f189f87385923226966a57537b7fa643e540a98 100644 (file)
@@ -54,6 +54,7 @@
 #include "en_stats.h"
 #include "en/fs.h"
 
+extern const struct net_device_ops mlx5e_netdev_ops;
 struct page_pool;
 
 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
index bbf69e859b78c1322a9e218797114c9483a1a060..1431232c9a09ef1ddecdf1e497142f82544e4de0 100644 (file)
@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
 
        DECLARE_HASHTABLE(mod_hdr_tbl, 8);
        DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+       struct notifier_block     netdevice_nb;
 };
 
 struct mlx5e_flow_table {
index 54118b77dc1f6d478c5b08e7c01526dd3e5dc740..f291d1bf15586b9bff40a3cc2ea8c62c71e62f05 100644 (file)
@@ -4315,7 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        }
 }
 
-static const struct net_device_ops mlx5e_netdev_ops = {
+const struct net_device_ops mlx5e_netdev_ops = {
        .ndo_open                = mlx5e_open,
        .ndo_stop                = mlx5e_close,
        .ndo_start_xmit          = mlx5e_xmit,
index 9fed54017659de3b0f58a1287a7eff605c077f6c..85796727093eec1ddfc3873cc345f2b8d861ea01 100644 (file)
@@ -1368,6 +1368,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 
                        *match_level = MLX5_MATCH_L2;
                }
+       } else {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
        }
 
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -2946,14 +2949,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
        return 0;
 }
 
+static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
+                                             struct mlx5e_priv *peer_priv)
+{
+       struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
+       struct mlx5e_hairpin_entry *hpe;
+       u16 peer_vhca_id;
+       int bkt;
+
+       if (!same_hw_devs(priv, peer_priv))
+               return;
+
+       peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+
+       hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
+               if (hpe->peer_vhca_id == peer_vhca_id)
+                       hpe->hp->pair->peer_gone = true;
+       }
+}
+
+static int mlx5e_tc_netdev_event(struct notifier_block *this,
+                                unsigned long event, void *ptr)
+{
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+       struct mlx5e_flow_steering *fs;
+       struct mlx5e_priv *peer_priv;
+       struct mlx5e_tc_table *tc;
+       struct mlx5e_priv *priv;
+
+       if (ndev->netdev_ops != &mlx5e_netdev_ops ||
+           event != NETDEV_UNREGISTER ||
+           ndev->reg_state == NETREG_REGISTERED)
+               return NOTIFY_DONE;
+
+       tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
+       fs = container_of(tc, struct mlx5e_flow_steering, tc);
+       priv = container_of(fs, struct mlx5e_priv, fs);
+       peer_priv = netdev_priv(ndev);
+       if (priv == peer_priv ||
+           !(priv->netdev->features & NETIF_F_HW_TC))
+               return NOTIFY_DONE;
+
+       mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
+
+       return NOTIFY_DONE;
+}
+
 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
 {
        struct mlx5e_tc_table *tc = &priv->fs.tc;
+       int err;
 
        hash_init(tc->mod_hdr_tbl);
        hash_init(tc->hairpin_tbl);
 
-       return rhashtable_init(&tc->ht, &tc_ht_params);
+       err = rhashtable_init(&tc->ht, &tc_ht_params);
+       if (err)
+               return err;
+
+       tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
+       if (register_netdevice_notifier(&tc->netdevice_nb)) {
+               tc->netdevice_nb.notifier_call = NULL;
+               mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+       }
+
+       return err;
 }
 
 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
@@ -2969,6 +3029,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
 {
        struct mlx5e_tc_table *tc = &priv->fs.tc;
 
+       if (tc->netdevice_nb.notifier_call)
+               unregister_netdevice_notifier(&tc->netdevice_nb);
+
        rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
 
        if (!IS_ERR_OR_NULL(tc->t)) {
index 2b252cde5cc2db3cc6c90566e22d2187f328a99e..ea7dedc2d5adfc48081387619222c8e07da43bd4 100644 (file)
@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
        u32 max_guarantee = 0;
        int i;
 
-       for (i = 0; i <= esw->total_vports; i++) {
+       for (i = 0; i < esw->total_vports; i++) {
                evport = &esw->vports[i];
                if (!evport->enabled || evport->info.min_rate < max_guarantee)
                        continue;
@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
        int err;
        int i;
 
-       for (i = 0; i <= esw->total_vports; i++) {
+       for (i = 0; i < esw->total_vports; i++) {
                evport = &esw->vports[i];
                if (!evport->enabled)
                        continue;
index d2f76070ea7ca87bcc98c1382cd95504e0a60104..a1ee9a8a769e8a96e2c25f84454772159bb4bd16 100644 (file)
@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
 
        for (i = 0; i < hp->num_channels; i++) {
                mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
-               mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+               if (!hp->peer_gone)
+                       mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
        }
 }
 
@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
                                       MLX5_RQC_STATE_RST, 0, 0);
 
        /* unset peer SQs */
+       if (hp->peer_gone)
+               return;
        for (i = 0; i < hp->num_channels; i++)
                mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
                                       MLX5_SQC_STATE_RST, 0, 0);
index 4d271fb3de3d2ba6f556f9869cb3720ce7a8f43c..5890fdfd62c377d9444d04589f0bf455d4ef6229 100644 (file)
@@ -718,14 +718,17 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
        memset(&active_cqns, 0, sizeof(active_cqns));
 
        while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
-               u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
 
-               switch (event_type) {
-               case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+               /* Command interface completion events are always received on
+                * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
+                * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
+                */
+               switch (q->num) {
+               case MLXSW_PCI_EQ_ASYNC_NUM:
                        mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
                        q->u.eq.ev_cmd_count++;
                        break;
-               case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+               case MLXSW_PCI_EQ_COMP_NUM:
                        cqn = mlxsw_pci_eqe_cqn_get(eqe);
                        set_bit(cqn, active_cqns);
                        cq_handle = true;
index b492152c8881bdd41a56f0a2b361a4b6bb86e40d..30bb2c533cecc42c5c75b3f21b36d2150baac083 100644 (file)
@@ -4845,6 +4845,8 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
                upper_dev = info->upper_dev;
                if (info->linking)
                        break;
+               if (is_vlan_dev(upper_dev))
+                       mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
                if (netif_is_macvlan(upper_dev))
                        mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
                break;
index 8ed38fd5a8520e0e125d96bc2ee7c53d891a5969..c6d29fdbb880f1964847e674dc512d2f0311f0b7 100644 (file)
@@ -2077,14 +2077,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
        return true;
 }
 
-static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
 {
        struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
        struct nfp_net *nn = r_vec->nfp_net;
        struct nfp_net_dp *dp = &nn->dp;
+       unsigned int budget = 512;
 
-       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
                continue;
+
+       return budget;
 }
 
 static void nfp_ctrl_poll(unsigned long arg)
@@ -2096,9 +2099,13 @@ static void nfp_ctrl_poll(unsigned long arg)
        __nfp_ctrl_tx_queued(r_vec);
        spin_unlock_bh(&r_vec->lock);
 
-       nfp_ctrl_rx(r_vec);
-
-       nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       if (nfp_ctrl_rx(r_vec)) {
+               nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       } else {
+               tasklet_schedule(&r_vec->tasklet);
+               nn_dp_warn(&r_vec->nfp_net->dp,
+                          "control message budget exceeded!\n");
+       }
 }
 
 /* Setup and Configuration
index 69aa7fc392c5e4ad1cbcd9025f56bffdf3aa92c7..59c70be22a84c11262388529cf0ddf09887cea96 100644 (file)
@@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
                work_func_t func, int delay);
 static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
 static int netxen_nic_poll(struct napi_struct *napi, int budget);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev);
-#endif
 
 static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
 static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
@@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
        .ndo_tx_timeout    = netxen_tx_timeout,
        .ndo_fix_features = netxen_fix_features,
        .ndo_set_features = netxen_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = netxen_nic_poll_controller,
-#endif
 };
 
 static inline bool netxen_function_zero(struct pci_dev *pdev)
@@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev)
-{
-       int ring;
-       struct nx_host_sds_ring *sds_ring;
-       struct netxen_adapter *adapter = netdev_priv(netdev);
-       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
-
-       disable_irq(adapter->irq);
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               netxen_intr(adapter->irq, sds_ring);
-       }
-       enable_irq(adapter->irq);
-}
-#endif
-
 static int
 nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
 {
index 9b3ef00e57824a5fd72e7ae06e3d1d44b59b176b..a71382687ef2bedca91adcd50d8c16dcbd1cd3c7 100644 (file)
@@ -11987,6 +11987,7 @@ struct public_global {
        u32 running_bundle_id;
        s32 external_temperature;
        u32 mdump_reason;
+       u64 reserved;
        u32 data_ptr;
        u32 data_size;
 };
index 17f3dfa2cc94084552a6f66cae0044c46af35fec..e860bdf0f7524195afce3607fafe8075f733bd0a 100644 (file)
@@ -1710,7 +1710,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
 
                cm_info->local_ip[0] = ntohl(iph->daddr);
                cm_info->remote_ip[0] = ntohl(iph->saddr);
-               cm_info->ip_version = TCP_IPV4;
+               cm_info->ip_version = QED_TCP_IPV4;
 
                ip_hlen = (iph->ihl) * sizeof(u32);
                *payload_len = ntohs(iph->tot_len) - ip_hlen;
@@ -1730,7 +1730,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
                        cm_info->remote_ip[i] =
                            ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
                }
-               cm_info->ip_version = TCP_IPV6;
+               cm_info->ip_version = QED_TCP_IPV6;
 
                ip_hlen = sizeof(*ip6h);
                *payload_len = ntohs(ip6h->payload_len);
index be941cfaa2d4fdf9f50eedd6467033617bfcdba7..c71391b9c757a1b03f55f21cc641c4718bbce719 100644 (file)
@@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
                                 num_cons, "Toggle");
        if (rc) {
                DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
-                          "Failed to allocate toogle bits, rc = %d\n", rc);
+                          "Failed to allocate toggle bits, rc = %d\n", rc);
                goto free_cq_map;
        }
 
index 7d7a64c55ff1fc2033e4ee273be0b40d10df8e74..f9167d1354bbef3ccf2e972e8c002e64bbc24cce 100644 (file)
@@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
 
 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
 {
-       enum roce_flavor flavor;
-
        switch (roce_mode) {
        case ROCE_V1:
-               flavor = PLAIN_ROCE;
-               break;
+               return PLAIN_ROCE;
        case ROCE_V2_IPV4:
-               flavor = RROCE_IPV4;
-               break;
+               return RROCE_IPV4;
        case ROCE_V2_IPV6:
-               flavor = ROCE_V2_IPV6;
-               break;
+               return RROCE_IPV6;
        default:
-               flavor = MAX_ROCE_MODE;
-               break;
+               return MAX_ROCE_FLAVOR;
        }
-       return flavor;
 }
 
 static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
index 8de644b4721efd63a7d3efa410139228d0b2f739..77b6248ad3b97d3a45caf27825faddabf9695a5b 100644 (file)
@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
 static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
                                  struct qed_tunnel_info *p_src)
 {
-       enum tunnel_clss type;
+       int type;
 
        p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
        p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
index 3d42696598202591794613afebea7ed42d51be6e..be118d057b92c5ad494690b7c80c98140dbb8e7a 100644 (file)
@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
        }
 
        if (!p_iov->b_pre_fp_hsi &&
-           ETH_HSI_VER_MINOR &&
            (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
                DP_INFO(p_hwfn,
                        "PF is using older fastpath HSI; %02x.%02x is configured\n",
@@ -572,7 +571,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
 static void
 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
                           struct qed_tunn_update_type *p_src,
-                          enum qed_tunn_clss mask, u8 *p_cls)
+                          enum qed_tunn_mode mask, u8 *p_cls)
 {
        if (p_src->b_update_mode) {
                p_req->tun_mode_update_mask |= BIT(mask);
@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
 static void
 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
                         struct qed_tunn_update_type *p_src,
-                        enum qed_tunn_clss mask,
+                        enum qed_tunn_mode mask,
                         u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
                         u8 *p_update_port, u16 *p_udp_port)
 {
index 81312924df1407092fd1dd43cc0555d16976160b..0c443ea98479ac0971a6e36c28bd8bde2f080bfa 100644 (file)
@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
        int (*config_loopback) (struct qlcnic_adapter *, u8);
        int (*clear_loopback) (struct qlcnic_adapter *, u8);
        int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
-       void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+       void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+                                u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
        int (*get_board_info) (struct qlcnic_adapter *);
        void (*set_mac_filter_count) (struct qlcnic_adapter *);
        void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
 }
 
 static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
-                                       u64 *addr, u16 id)
+                                       u64 *addr, u16 vlan,
+                                       struct qlcnic_host_tx_ring *tx_ring)
 {
-       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
 }
 
 static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
index 569d54ededeca2e6472a3f8502e91c45be8e5232..a79d84f9910229515acf900e8286f71b8a010ae1 100644 (file)
@@ -2135,7 +2135,8 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
 }
 
 void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
-                                 u16 vlan_id)
+                                 u16 vlan_id,
+                                 struct qlcnic_host_tx_ring *tx_ring)
 {
        u8 mac[ETH_ALEN];
        memcpy(&mac, addr, ETH_ALEN);
index b75a812468569de7728fd9c654b6f1c7e353729f..73fe2f64491de24408d893a3eb91ffb691fe4f03 100644 (file)
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
 int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
 int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+                                 u16 vlan, struct qlcnic_host_tx_ring *ring);
 int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
 int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
 void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
index 4bb33af8e2b3a956db02847bfebfa6ef2362bb3b..56a3bd9e37dcd773e9d8b1d52366eaa971506fa6 100644 (file)
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
                         struct net_device *netdev);
 void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
-                              u64 *uaddr, u16 vlan_id);
+                              u64 *uaddr, u16 vlan_id,
+                              struct qlcnic_host_tx_ring *tx_ring);
 int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
                                     struct ethtool_coalesce *);
 int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
index 84dd83031a1bfcc31c0f8a908fef0c1bb3e7d155..9647578cbe6a8fec82409c4eadf9aee02f6c7971 100644 (file)
@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
 }
 
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
-                              u16 vlan_id)
+                              u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
 {
        struct cmd_desc_type0 *hwdesc;
        struct qlcnic_nic_req *req;
        struct qlcnic_mac_req *mac_req;
        struct qlcnic_vlan_req *vlan_req;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
        u32 producer;
        u64 word;
 
@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
 
 static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                               struct cmd_desc_type0 *first_desc,
-                              struct sk_buff *skb)
+                              struct sk_buff *skb,
+                              struct qlcnic_host_tx_ring *tx_ring)
 {
        struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                    tmp_fil->vlan_id == vlan_id) {
                        if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
                                qlcnic_change_filter(adapter, &src_addr,
-                                                    vlan_id);
+                                                    vlan_id, tx_ring);
                        tmp_fil->ftime = jiffies;
                        return;
                }
@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
        if (!fil)
                return;
 
-       qlcnic_change_filter(adapter, &src_addr, vlan_id);
+       qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
        fil->ftime = jiffies;
        fil->vlan_id = vlan_id;
        memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (adapter->drv_mac_learn)
-               qlcnic_send_filter(adapter, first_desc, skb);
+               qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
 
        tx_ring->tx_stats.tx_bytes += skb->len;
        tx_ring->tx_stats.xmit_called++;
index 2d38d1ac2aae58fd210030c7b143011f76b921cc..dbd48012224f2467d27134eedc692a68b92b1a04 100644 (file)
@@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
 static void qlcnic_tx_timeout(struct net_device *netdev);
 static void qlcnic_attach_work(struct work_struct *work);
 static void qlcnic_fwinit_work(struct work_struct *work);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev);
-#endif
 
 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_udp_tunnel_add     = qlcnic_add_vxlan_port,
        .ndo_udp_tunnel_del     = qlcnic_del_vxlan_port,
        .ndo_features_check     = qlcnic_features_check,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = qlcnic_poll_controller,
-#endif
 #ifdef CONFIG_QLCNIC_SRIOV
        .ndo_set_vf_mac         = qlcnic_sriov_set_vf_mac,
        .ndo_set_vf_rate        = qlcnic_sriov_set_vf_tx_rate,
@@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev)
-{
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx;
-       struct qlcnic_host_tx_ring *tx_ring;
-       int ring;
-
-       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
-               return;
-
-       recv_ctx = adapter->recv_ctx;
-
-       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               qlcnic_disable_sds_intr(adapter, sds_ring);
-               napi_schedule(&sds_ring->napi);
-       }
-
-       if (adapter->flags & QLCNIC_MSIX_ENABLED) {
-               /* Only Multi-Tx queue capable devices need to
-                * schedule NAPI for TX rings
-                */
-               if ((qlcnic_83xx_check(adapter) &&
-                    (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
-                   (qlcnic_82xx_check(adapter) &&
-                    !qlcnic_check_multi_tx(adapter)))
-                       return;
-
-               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
-                       tx_ring = &adapter->tx_ring[ring];
-                       qlcnic_disable_tx_intr(adapter, tx_ring);
-                       napi_schedule(&tx_ring->napi);
-               }
-       }
-}
-#endif
-
 static void
 qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
 {
index 7fd86d40a3374df1fba991ece10e6ec48bc197e1..11167abe5934d3a2d2d71f6cb0f7674d665d9d5b 100644 (file)
@@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
        struct sk_buff *skbn;
 
        if (skb->dev->type == ARPHRD_ETHER) {
-               if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+               if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
                        kfree_skb(skb);
                        return;
                }
@@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
        }
 
        if (skb_headroom(skb) < required_headroom) {
-               if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
+               if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
                        return -ENOMEM;
        }
 
@@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
        if (!skb)
                goto done;
 
+       if (skb->pkt_type == PACKET_LOOPBACK)
+               return RX_HANDLER_PASS;
+
        dev = skb->dev;
        port = rmnet_get_port(dev);
 
index ab30aaeac6d377e6303fafc57f820e3022ae7773..9a5e2969df6197cd3383e263b2336dca5faa1a2d 100644 (file)
@@ -4072,13 +4072,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
 
        genphy_soft_reset(dev->phydev);
 
-       /* It was reported that chip version 33 ends up with 10MBit/Half on a
+       /* It was reported that several chips end up with 10MBit/Half on a
         * 1GBit link after resuming from S3. For whatever reason the PHY on
-        * this chip doesn't properly start a renegotiation when soft-reset.
+        * these chips doesn't properly start a renegotiation when soft-reset.
         * Explicitly requesting a renegotiation fixes this.
         */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_33 &&
-           dev->phydev->autoneg == AUTONEG_ENABLE)
+       if (dev->phydev->autoneg == AUTONEG_ENABLE)
                phy_restart_aneg(dev->phydev);
 }
 
@@ -4536,9 +4535,14 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
 
 static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
 {
-       /* Set DMA burst size and Interframe Gap Time */
-       RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
-               (InterFrameGap << TxInterFrameGapShift));
+       u32 val = TX_DMA_BURST << TxDMAShift |
+                 InterFrameGap << TxInterFrameGapShift;
+
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+           tp->mac_version != RTL_GIGA_MAC_VER_39)
+               val |= TXCFG_AUTO_FIFO;
+
+       RTL_W32(tp, TxConfig, val);
 }
 
 static void rtl_set_rx_max_size(struct rtl8169_private *tp)
@@ -5033,7 +5037,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        rtl_disable_clock_request(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
 
        /* Adjust EEE LED frequency */
@@ -5067,7 +5070,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
 
        rtl_disable_clock_request(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
        RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
@@ -5112,8 +5114,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8168g(struct rtl8169_private *tp)
 {
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5211,8 +5211,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
        rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5295,8 +5293,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
 {
        rtl8168ep_stop_cmac(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
@@ -5618,7 +5614,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
        /* Force LAN exit from ASPM if Rx/Tx are not idle */
        RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
 
        rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -6869,8 +6864,10 @@ static int rtl8169_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
+       struct rtl8169_private *tp = netdev_priv(dev);
 
        rtl8169_net_suspend(dev);
+       clk_disable_unprepare(tp->clk);
 
        return 0;
 }
@@ -6898,6 +6895,9 @@ static int rtl8169_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
+       struct rtl8169_private *tp = netdev_priv(dev);
+
+       clk_prepare_enable(tp->clk);
 
        if (netif_running(dev))
                __rtl8169_resume(dev);
index 330233286e785254f5f29c87f9557a305974f606..3d0dd39c289e05b8a7a6778363461ef5698dc62b 100644 (file)
@@ -2206,29 +2206,6 @@ static void efx_fini_napi(struct efx_nic *efx)
                efx_fini_napi_channel(channel);
 }
 
-/**************************************************************************
- *
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void efx_netpoll(struct net_device *net_dev)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_channel *channel;
-
-       efx_for_each_channel(channel, efx)
-               efx_schedule_channel(channel);
-}
-
-#endif
-
 /**************************************************************************
  *
  * Kernel net device interface
@@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
 #endif
        .ndo_get_phys_port_id   = efx_get_phys_port_id,
        .ndo_get_phys_port_name = efx_get_phys_port_name,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = efx_netpoll,
-#endif
        .ndo_setup_tc           = efx_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = efx_filter_rfs,
index dd5530a4f8c8936868aed7171bd9481f93730d76..03e2455c502eacd9a4fd5c7fd320a9edcf265f77 100644 (file)
@@ -2052,29 +2052,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
                ef4_fini_napi_channel(channel);
 }
 
-/**************************************************************************
- *
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void ef4_netpoll(struct net_device *net_dev)
-{
-       struct ef4_nic *efx = netdev_priv(net_dev);
-       struct ef4_channel *channel;
-
-       ef4_for_each_channel(channel, efx)
-               ef4_schedule_channel(channel);
-}
-
-#endif
-
 /**************************************************************************
  *
  * Kernel net device interface
@@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
        .ndo_set_mac_address    = ef4_set_mac_address,
        .ndo_set_rx_mode        = ef4_set_rx_mode,
        .ndo_set_features       = ef4_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = ef4_netpoll,
-#endif
        .ndo_setup_tc           = ef4_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = ef4_filter_rfs,
index 16ec7af6ab7b3f53bd1e7165819c76b99736bf86..ba9df430fca6e45ea10db1dc9d762ce700102121 100644 (file)
@@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                 sizeof(struct yamdrv_ioctl_mcs));
                if (IS_ERR(ym))
                        return PTR_ERR(ym);
+               if (ym->cmd != SIOCYAMSMCS)
+                       return -EINVAL;
                if (ym->bitrate > YAM_MAXBITRATE) {
                        kfree(ym);
                        return -EINVAL;
@@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
                         return -EFAULT;
 
+               if (yi.cmd != SIOCYAMSCFG)
+                       return -EINVAL;
                if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
                        return -EINVAL;         /* Cannot change this parameter when up */
                if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
index 23a52b9293f35eaec1d71063305a029ba466d819..cd1d8faccca5fb36b488312d734d5e42cebb7b1a 100644 (file)
@@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi)
 {
        struct adf7242_local *lp = spi_get_drvdata(spi);
 
-       if (!IS_ERR_OR_NULL(lp->debugfs_root))
-               debugfs_remove_recursive(lp->debugfs_root);
+       debugfs_remove_recursive(lp->debugfs_root);
 
        cancel_delayed_work_sync(&lp->work);
        destroy_workqueue(lp->wqueue);
index 58299fb666ed4d84fb7ea01a76aabb86575ab939..0ff5a403a8dc356a359fb085be26379ca011b67b 100644 (file)
@@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write(
        for (i = 0; i < len; i++)
                dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
 
-       fifo_buffer = kmalloc(len, GFP_KERNEL);
+       fifo_buffer = kmemdup(buf, len, GFP_KERNEL);
        if (!fifo_buffer)
                return -ENOMEM;
-       memcpy(fifo_buffer, buf, len);
        kfifo_in(&test->up_fifo, &fifo_buffer, 4);
        wake_up_interruptible(&priv->test.readq);
 
@@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
 {
        struct ca8210_test *test = &priv->test;
 
-       if (!IS_ERR(test->ca8210_dfs_spi_int))
-               debugfs_remove(test->ca8210_dfs_spi_int);
+       debugfs_remove(test->ca8210_dfs_spi_int);
        kfifo_free(&test->up_fifo);
        dev_info(&priv->spi->dev, "Test interface removed\n");
 }
index e428277781ac4422bec2e8f47fd35476a85a74f7..04891429a55423e4ea4a3f5f5025631c01cda13a 100644 (file)
@@ -903,19 +903,19 @@ mcr20a_irq_clean_complete(void *context)
 
        switch (seq_state) {
        /* TX IRQ, RX IRQ and SEQ IRQ */
-       case (0x03):
+       case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        lp->is_tx = 0;
                        dev_dbg(printdev(lp), "TX is done. No ACK\n");
                        mcr20a_handle_tx_complete(lp);
                }
                break;
-       case (0x05):
+       case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
                        /* rx is starting */
                        dev_dbg(printdev(lp), "RX is starting\n");
                        mcr20a_handle_rx(lp);
                break;
-       case (0x07):
+       case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        /* tx is done */
                        lp->is_tx = 0;
@@ -927,7 +927,7 @@ mcr20a_irq_clean_complete(void *context)
                        mcr20a_handle_rx(lp);
                }
                break;
-       case (0x01):
+       case (DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        dev_dbg(printdev(lp), "TX is starting\n");
                        mcr20a_handle_tx(lp);
index db1172db1e7cb7df0fed8a21b0a7757ae6c068b5..19ab8a7d1e4863dc5b0a5208c4b7e674ba2ff6de 100644 (file)
@@ -93,7 +93,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
        if (!netdev)
                return !phydev->suspended;
 
-       /* Don't suspend PHY if the attached netdev parent may wakeup.
+       if (netdev->wol_enabled)
+               return false;
+
+       /* As long as not all affected network drivers support the
+        * wol_enabled flag, let's check for hints that WoL is enabled.
+        * Don't suspend PHY if the attached netdev parent may wake up.
         * The parent may point to a PCI device, as in tg3 driver.
         */
        if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
@@ -1132,9 +1137,9 @@ void phy_detach(struct phy_device *phydev)
                sysfs_remove_link(&dev->dev.kobj, "phydev");
                sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
        }
+       phy_suspend(phydev);
        phydev->attached_dev->phydev = NULL;
        phydev->attached_dev = NULL;
-       phy_suspend(phydev);
        phydev->phylink = NULL;
 
        phy_led_triggers_unregister(phydev);
@@ -1168,12 +1173,13 @@ EXPORT_SYMBOL(phy_detach);
 int phy_suspend(struct phy_device *phydev)
 {
        struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
+       struct net_device *netdev = phydev->attached_dev;
        struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
        int ret = 0;
 
        /* If the device has WOL enabled, we cannot suspend the PHY */
        phy_ethtool_get_wol(phydev, &wol);
-       if (wol.wolopts)
+       if (wol.wolopts || (netdev && netdev->wol_enabled))
                return -EBUSY;
 
        if (phydev->drv && phydrv->suspend)
index 3ba5cf2a8a5fbe4bd978a114a6f46b698f8c0b57..7abca86c3aa9bf367a6e2da8125e203a6f853ef7 100644 (file)
@@ -717,6 +717,30 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
        return 0;
 }
 
+static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
+               phy_interface_t interface)
+{
+       int ret;
+
+       if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+                   (pl->link_an_mode == MLO_AN_INBAND &&
+                    phy_interface_mode_is_8023z(interface))))
+               return -EINVAL;
+
+       if (pl->phydev)
+               return -EBUSY;
+
+       ret = phy_attach_direct(pl->netdev, phy, 0, interface);
+       if (ret)
+               return ret;
+
+       ret = phylink_bringup_phy(pl, phy);
+       if (ret)
+               phy_detach(phy);
+
+       return ret;
+}
+
 /**
  * phylink_connect_phy() - connect a PHY to the phylink instance
  * @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -734,31 +758,13 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
  */
 int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
 {
-       int ret;
-
-       if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
-                   (pl->link_an_mode == MLO_AN_INBAND &&
-                    phy_interface_mode_is_8023z(pl->link_interface))))
-               return -EINVAL;
-
-       if (pl->phydev)
-               return -EBUSY;
-
        /* Use PHY device/driver interface */
        if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
                pl->link_interface = phy->interface;
                pl->link_config.interface = pl->link_interface;
        }
 
-       ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
-       if (ret)
-               return ret;
-
-       ret = phylink_bringup_phy(pl, phy);
-       if (ret)
-               phy_detach(phy);
-
-       return ret;
+       return __phylink_connect_phy(pl, phy, pl->link_interface);
 }
 EXPORT_SYMBOL_GPL(phylink_connect_phy);
 
@@ -1672,7 +1678,9 @@ static void phylink_sfp_link_up(void *upstream)
 
 static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
 {
-       return phylink_connect_phy(upstream, phy);
+       struct phylink *pl = upstream;
+
+       return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
 }
 
 static void phylink_sfp_disconnect_phy(void *upstream)
index 52fffb98fde9ac3fd05c7f6fd8e5dc123ecae341..6e13b8832bc7df94467211f07c1e7dba15a6e877 100644 (file)
@@ -1098,8 +1098,11 @@ static int sfp_hwmon_insert(struct sfp *sfp)
 
 static void sfp_hwmon_remove(struct sfp *sfp)
 {
-       hwmon_device_unregister(sfp->hwmon_dev);
-       kfree(sfp->hwmon_name);
+       if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+               hwmon_device_unregister(sfp->hwmon_dev);
+               sfp->hwmon_dev = NULL;
+               kfree(sfp->hwmon_name);
+       }
 }
 #else
 static int sfp_hwmon_insert(struct sfp *sfp)
index 6a047d30e8c69f81cfb234113d66d03d216878ac..d887016e54b68dc06a1bdae7d1a72391020baf0d 100644 (file)
@@ -1167,6 +1167,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                return -EBUSY;
        }
 
+       if (dev == port_dev) {
+               NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
+               netdev_err(dev, "Cannot enslave team device to itself\n");
+               return -EINVAL;
+       }
+
        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
            vlan_uses_dev(dev)) {
                NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
index e2648b5a3861e51dc6c40d19e1198a5f3f7ca7af..50e9cc19023a701bad861ac117665a024ba776b1 100644 (file)
@@ -181,6 +181,7 @@ struct tun_file {
        };
        struct napi_struct napi;
        bool napi_enabled;
+       bool napi_frags_enabled;
        struct mutex napi_mutex;        /* Protects access to the above napi */
        struct list_head next;
        struct tun_struct *detached;
@@ -313,32 +314,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget)
 }
 
 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
-                         bool napi_en)
+                         bool napi_en, bool napi_frags)
 {
        tfile->napi_enabled = napi_en;
+       tfile->napi_frags_enabled = napi_en && napi_frags;
        if (napi_en) {
                netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
                               NAPI_POLL_WEIGHT);
                napi_enable(&tfile->napi);
-               mutex_init(&tfile->napi_mutex);
        }
 }
 
-static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_disable(struct tun_file *tfile)
 {
        if (tfile->napi_enabled)
                napi_disable(&tfile->napi);
 }
 
-static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_del(struct tun_file *tfile)
 {
        if (tfile->napi_enabled)
                netif_napi_del(&tfile->napi);
 }
 
-static bool tun_napi_frags_enabled(const struct tun_struct *tun)
+static bool tun_napi_frags_enabled(const struct tun_file *tfile)
 {
-       return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
+       return tfile->napi_frags_enabled;
 }
 
 #ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -690,8 +691,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
        tun = rtnl_dereference(tfile->tun);
 
        if (tun && clean) {
-               tun_napi_disable(tun, tfile);
-               tun_napi_del(tun, tfile);
+               tun_napi_disable(tfile);
+               tun_napi_del(tfile);
        }
 
        if (tun && !tfile->detached) {
@@ -758,7 +759,7 @@ static void tun_detach_all(struct net_device *dev)
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
                BUG_ON(!tfile);
-               tun_napi_disable(tun, tfile);
+               tun_napi_disable(tfile);
                tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
                tfile->socket.sk->sk_data_ready(tfile->socket.sk);
                RCU_INIT_POINTER(tfile->tun, NULL);
@@ -774,7 +775,7 @@ static void tun_detach_all(struct net_device *dev)
        synchronize_net();
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
-               tun_napi_del(tun, tfile);
+               tun_napi_del(tfile);
                /* Drop read queue */
                tun_queue_purge(tfile);
                xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -793,7 +794,7 @@ static void tun_detach_all(struct net_device *dev)
 }
 
 static int tun_attach(struct tun_struct *tun, struct file *file,
-                     bool skip_filter, bool napi)
+                     bool skip_filter, bool napi, bool napi_frags)
 {
        struct tun_file *tfile = file->private_data;
        struct net_device *dev = tun->dev;
@@ -866,7 +867,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
                tun_enable_queue(tfile);
        } else {
                sock_hold(&tfile->sk);
-               tun_napi_init(tun, tfile, napi);
+               tun_napi_init(tun, tfile, napi, napi_frags);
        }
 
        tun_set_real_num_queues(tun);
@@ -1709,7 +1710,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        int err;
        u32 rxhash = 0;
        int skb_xdp = 1;
-       bool frags = tun_napi_frags_enabled(tun);
+       bool frags = tun_napi_frags_enabled(tfile);
 
        if (!(tun->dev->flags & IFF_UP))
                return -EIO;
@@ -2534,7 +2535,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                        return err;
 
                err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
-                                ifr->ifr_flags & IFF_NAPI);
+                                ifr->ifr_flags & IFF_NAPI,
+                                ifr->ifr_flags & IFF_NAPI_FRAGS);
                if (err < 0)
                        return err;
 
@@ -2632,7 +2634,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                              (ifr->ifr_flags & TUN_FEATURES);
 
                INIT_LIST_HEAD(&tun->disabled);
-               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
+               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
+                                ifr->ifr_flags & IFF_NAPI_FRAGS);
                if (err < 0)
                        goto err_free_flow;
 
@@ -2781,7 +2784,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                ret = security_tun_dev_attach_queue(tun->security);
                if (ret < 0)
                        goto unlock;
-               ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
+               ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
+                                tun->flags & IFF_NAPI_FRAGS);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
                if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -3199,6 +3203,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
                return -ENOMEM;
        }
 
+       mutex_init(&tfile->napi_mutex);
        RCU_INIT_POINTER(tfile->tun, NULL);
        tfile->flags = 0;
        tfile->ifindex = 0;
index e95dd12edec473198125c18c1cec6bc7d32ec368..023b8d0bf1754e833e08514b9cf6165ce3240984 100644 (file)
@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= AX_MONITOR_LINK;
        if (wolinfo->wolopts & WAKE_MAGIC)
index 9e8ad372f4190eed1d4e92891193d325c44fb47f..2207f7a7d1ffbb3fe6c4fefa101c4bb2ae01384e 100644 (file)
@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= AX_MONITOR_MODE_RWLC;
        if (wolinfo->wolopts & WAKE_MAGIC)
index a9991c5f4736b6dd1e395527fbfeeecc3d0ab303..c3c9ba44e2a12a038e012a3374977b5a6189e3f1 100644 (file)
@@ -1401,19 +1401,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
        if (ret < 0)
                return ret;
 
-       pdata->wol = 0;
-       if (wol->wolopts & WAKE_UCAST)
-               pdata->wol |= WAKE_UCAST;
-       if (wol->wolopts & WAKE_MCAST)
-               pdata->wol |= WAKE_MCAST;
-       if (wol->wolopts & WAKE_BCAST)
-               pdata->wol |= WAKE_BCAST;
-       if (wol->wolopts & WAKE_MAGIC)
-               pdata->wol |= WAKE_MAGIC;
-       if (wol->wolopts & WAKE_PHY)
-               pdata->wol |= WAKE_PHY;
-       if (wol->wolopts & WAKE_ARP)
-               pdata->wol |= WAKE_ARP;
+       if (wol->wolopts & ~WAKE_ALL)
+               return -EINVAL;
+
+       pdata->wol = wol->wolopts;
 
        device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
 
index 2cd71bdb6484c774659598fff1e99cd49181337b..f1b5201cc32075da27cf14d94b781c9f58c16189 100644 (file)
@@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        if (!rtl_can_wakeup(tp))
                return -EOPNOTSUPP;
 
+       if (wol->wolopts & ~WAKE_ANY)
+               return -EINVAL;
+
        ret = usb_autopm_get_interface(tp->intf);
        if (ret < 0)
                goto out_set_wol;
index 05553d2524469f97e4a02bb48f43f6820ad2b3e5..ec287c9741e833eb2af7b2878ee08ff1941227b0 100644 (file)
@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
        int ret;
 
+       if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+               return -EINVAL;
+
        pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
 
        ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
@@ -1517,6 +1520,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
        if (pdata) {
+               cancel_work_sync(&pdata->set_multicast);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
                pdata = NULL;
index 06b4d290784dad95f893b63da62d26e020fc060a..262e7a3c23cb67fbfd66b81ed0d26af0f0480d84 100644 (file)
@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        int ret;
 
+       if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+               return -EINVAL;
+
        pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
 
        ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
index 9277a0f228dfa6de355c74d2652edcf2fb1d2f4b..35f39f23d88144195b8f007035f207d38b48c1fd 100644 (file)
@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= SR_MONITOR_LINK;
        if (wolinfo->wolopts & WAKE_MAGIC)
index 76592090522607a4bdf5422b1d49ec99c6fd68ac..dab504ec5e502be401cbfe9a8e3f0f572c0220ba 100644 (file)
@@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev,
        tot->rx_frame_errors = dev->stats.rx_frame_errors;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void virtnet_netpoll(struct net_device *dev)
-{
-       struct virtnet_info *vi = netdev_priv(dev);
-       int i;
-
-       for (i = 0; i < vi->curr_queue_pairs; i++)
-               napi_schedule(&vi->rq[i].napi);
-}
-#endif
-
 static void virtnet_ack_link_announce(struct virtnet_info *vi)
 {
        rtnl_lock();
@@ -2447,9 +2436,6 @@ static const struct net_device_ops virtnet_netdev = {
        .ndo_get_stats64     = virtnet_stats,
        .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = virtnet_netpoll,
-#endif
        .ndo_bpf                = virtnet_xdp,
        .ndo_xdp_xmit           = virtnet_xdp_xmit,
        .ndo_features_check     = passthru_features_check,
index ababba37d735d62b7fe0500983f411d7806baa17..2b8da2b7e721e33f0683efa61e50ceac68d256e7 100644 (file)
@@ -3539,6 +3539,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
                nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
                nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TTL */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TTL_INHERIT */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TOS */
                nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_LEARNING */
@@ -3603,6 +3604,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
        }
 
        if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+           nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
+                      !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
            nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
            nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
            nla_put_u8(skb, IFLA_VXLAN_LEARNING,
index 094cea775d0c0bd3090102cf5d511d08d718fef7..ef298d8525c5481c8df55f933cf0df6452ba41c0 100644 (file)
@@ -257,7 +257,7 @@ static const struct
        [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
        [I2400M_MS_BUSY] = { "busy", -EBUSY },
        [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
-       [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
+       [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
        [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
        [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
        [I2400M_MS_NO_RF] = { "no RF", -EIO },
index 6b0e1ec346cb60aacd8076600033cf9ee554c462..d46d57b989aec0d1fa869128b62022b7be401892 100644 (file)
@@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                        }
                } else {
                        /* More than a single header/data pair were missed.
-                        * Report this error, and reset the controller to
+                        * Report this error. If running with open-source
+                        * firmware, then reset the controller to
                         * revive operation.
                         */
                        b43dbg(dev->wl,
                               "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
                               ring->index, firstused, slot);
-                       b43_controller_restart(dev, "Out of order TX");
+                       if (dev->fw.opensource)
+                               b43_controller_restart(dev, "Out of order TX");
                        return;
                }
        }
index 5916879849621dc079a1cb0cd61848201057527d..497fd766d87c83e0bcda39ff47b9a37177f79b09 100644 (file)
@@ -51,6 +51,7 @@
 
 static const struct iwl_base_params iwl1000_base_params = {
        .num_of_queues = IWLAGN_NUM_QUEUES,
+       .max_tfd_queue_size = 256,
        .eeprom_size = OTP_LOW_IMAGE_SIZE,
        .pll_cfg = true,
        .max_ll_items = OTP_MAX_LL_ITEMS_1000,
index 1068757ec42e4784942e69c00161ae4e1ee16548..07442ada6dd0e419bf4f29fabd3cb67186e47650 100644 (file)
@@ -520,7 +520,6 @@ struct mac80211_hwsim_data {
        int channels, idx;
        bool use_chanctx;
        bool destroy_on_close;
-       struct work_struct destroy_work;
        u32 portid;
        char alpha2[2];
        const struct ieee80211_regdomain *regd;
@@ -2935,8 +2934,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        hwsim_radios_generation++;
        spin_unlock_bh(&hwsim_radio_lock);
 
-       if (idx > 0)
-               hwsim_mcast_new_radio(idx, info, param);
+       hwsim_mcast_new_radio(idx, info, param);
 
        return idx;
 
@@ -3565,30 +3563,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
        .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
 };
 
-static void destroy_radio(struct work_struct *work)
-{
-       struct mac80211_hwsim_data *data =
-               container_of(work, struct mac80211_hwsim_data, destroy_work);
-
-       hwsim_radios_generation++;
-       mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
-}
-
 static void remove_user_radios(u32 portid)
 {
        struct mac80211_hwsim_data *entry, *tmp;
+       LIST_HEAD(list);
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
                if (entry->destroy_on_close && entry->portid == portid) {
-                       list_del(&entry->list);
+                       list_move(&entry->list, &list);
                        rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
                                               hwsim_rht_params);
-                       INIT_WORK(&entry->destroy_work, destroy_radio);
-                       queue_work(hwsim_wq, &entry->destroy_work);
+                       hwsim_radios_generation++;
                }
        }
        spin_unlock_bh(&hwsim_radio_lock);
+
+       list_for_each_entry_safe(entry, tmp, &list, list) {
+               list_del(&entry->list);
+               mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
+                                        NULL);
+       }
 }
 
 static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -3646,6 +3641,7 @@ static __net_init int hwsim_init_net(struct net *net)
 static void __net_exit hwsim_exit_net(struct net *net)
 {
        struct mac80211_hwsim_data *data, *tmp;
+       LIST_HEAD(list);
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
@@ -3656,17 +3652,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
                if (data->netgroup == hwsim_net_get_netgroup(&init_net))
                        continue;
 
-               list_del(&data->list);
+               list_move(&data->list, &list);
                rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
                                       hwsim_rht_params);
                hwsim_radios_generation++;
-               spin_unlock_bh(&hwsim_radio_lock);
+       }
+       spin_unlock_bh(&hwsim_radio_lock);
+
+       list_for_each_entry_safe(data, tmp, &list, list) {
+               list_del(&data->list);
                mac80211_hwsim_del_radio(data,
                                         wiphy_name(data->hw->wiphy),
                                         NULL);
-               spin_lock_bh(&hwsim_radio_lock);
        }
-       spin_unlock_bh(&hwsim_radio_lock);
 
        ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
 }
index cf6ffb1ba4a290e1561374c68af9b947215af0ea..22bc9d368728624ef70a0f009d38e8925380c872 100644 (file)
@@ -77,9 +77,8 @@ static void mt76x0_remove_interface(struct ieee80211_hw *hw,
 {
        struct mt76x0_dev *dev = hw->priv;
        struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
-       unsigned int wcid = mvif->group_wcid.idx;
 
-       dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+       dev->vif_mask &= ~BIT(mvif->idx);
 }
 
 static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
index a46a1e94505d01d782efea328945d91252449981..936c0b3e0ba28ec1f6586a5bab86d403a86d0dd6 100644 (file)
@@ -241,8 +241,9 @@ struct xenvif_hash_cache {
 struct xenvif_hash {
        unsigned int alg;
        u32 flags;
+       bool mapping_sel;
        u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
-       u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+       u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
        unsigned int size;
        struct xenvif_hash_cache cache;
 };
index 3c4c58b9fe76edfbf3d27fb5b6dbd0184ba706c0..0ccb021f1e78687d7c7a9814a05369aafe7c6508 100644 (file)
@@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
                return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
        vif->hash.size = size;
-       memset(vif->hash.mapping, 0, sizeof(u32) * size);
+       memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
+              sizeof(u32) * size);
 
        return XEN_NETIF_CTRL_STATUS_SUCCESS;
 }
@@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
                            u32 off)
 {
-       u32 *mapping = &vif->hash.mapping[off];
-       struct gnttab_copy copy_op = {
+       u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
+       unsigned int nr = 1;
+       struct gnttab_copy copy_op[2] = {{
                .source.u.ref = gref,
                .source.domid = vif->domid,
-               .dest.u.gmfn = virt_to_gfn(mapping),
                .dest.domid = DOMID_SELF,
-               .dest.offset = xen_offset_in_page(mapping),
-               .len = len * sizeof(u32),
+               .len = len * sizeof(*mapping),
                .flags = GNTCOPY_source_gref
-       };
+       }};
 
-       if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+       if ((off + len < off) || (off + len > vif->hash.size) ||
+           len > XEN_PAGE_SIZE / sizeof(*mapping))
                return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
-       while (len-- != 0)
-               if (mapping[off++] >= vif->num_queues)
-                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+       copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
+       copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
+       if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
+               copy_op[1] = copy_op[0];
+               copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
+               copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
+               copy_op[1].dest.offset = 0;
+               copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
+               copy_op[0].len = copy_op[1].source.offset;
+               nr = 2;
+       }
 
-       if (copy_op.len != 0) {
-               gnttab_batch_copy(&copy_op, 1);
+       memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
+              vif->hash.size * sizeof(*mapping));
 
-               if (copy_op.status != GNTST_okay)
+       if (copy_op[0].len != 0) {
+               gnttab_batch_copy(copy_op, nr);
+
+               if (copy_op[0].status != GNTST_okay ||
+                   copy_op[nr - 1].status != GNTST_okay)
                        return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
        }
 
+       while (len-- != 0)
+               if (mapping[off++] >= vif->num_queues)
+                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+       vif->hash.mapping_sel = !vif->hash.mapping_sel;
+
        return XEN_NETIF_CTRL_STATUS_SUCCESS;
 }
 
@@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
        }
 
        if (vif->hash.size != 0) {
+               const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
+
                seq_puts(m, "\nHash Mapping:\n");
 
                for (i = 0; i < vif->hash.size; ) {
@@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
                        seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
 
                        for (j = 0; j < n; j++, i++)
-                               seq_printf(m, "%4u ", vif->hash.mapping[i]);
+                               seq_printf(m, "%4u ", mapping[i]);
 
                        seq_puts(m, "\n");
                }
index 92274c2372008a57ba12ca960bafa84cd2eac7b3..f6ae23fc3f6b086e60149befd9a3ca9500a48bf1 100644 (file)
@@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
        if (size == 0)
                return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
 
-       return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+       return vif->hash.mapping[vif->hash.mapping_sel]
+                               [skb_get_hash_raw(skb) % size];
 }
 
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
index 5a9562881d4ef87ddfbd749555f1c8eccf01acc5..9fe3fff818b8a42281b30bcd3bba83c0e0dd36f8 100644 (file)
@@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 
        INIT_WORK(&ctrl->ana_work, nvme_ana_work);
        ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
-       if (!ctrl->ana_log_buf)
+       if (!ctrl->ana_log_buf) {
+               error = -ENOMEM;
                goto out;
+       }
 
        error = nvme_read_ana_log(ctrl, true);
        if (error)
@@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 out_free_ana_log_buf:
        kfree(ctrl->ana_log_buf);
 out:
-       return -ENOMEM;
+       return error;
 }
 
 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
index 778c4f76a884320b0fad349260251acf796462a2..2153956a0b207cae268ffabf8392a3025f22432b 100644 (file)
@@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
                if (val & PCIE_ATU_ENABLE)
                        return;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 }
@@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
                if (val & PCIE_ATU_ENABLE)
                        return;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 }
@@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
                if (val & PCIE_ATU_ENABLE)
                        return 0;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 
@@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
                if (val & PCIE_ATU_ENABLE)
                        return 0;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 
index 96126fd8403ccd79450936b7d4f52de551d07c40..9f1a5e399b7033eba9918a7981bdfdb0ea957a7a 100644 (file)
@@ -26,8 +26,7 @@
 
 /* Parameters for the waiting for iATU enabled routine */
 #define LINK_WAIT_MAX_IATU_RETRIES     5
-#define LINK_WAIT_IATU_MIN             9000
-#define LINK_WAIT_IATU_MAX             10000
+#define LINK_WAIT_IATU                 9
 
 /* Synopsys-specific PCIe configuration registers */
 #define PCIE_PORT_LINK_CONTROL         0x710
index 50eb0729385b8a86b106f561ad90898042a881e6..a41d79b8d46a2a8b7c2fc5c371e327643a74178c 100644 (file)
@@ -1145,7 +1145,6 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
 {
        struct device *dev = &pcie->pdev->dev;
        struct device_node *np = dev->of_node;
-       unsigned int i;
        int ret;
 
        INIT_LIST_HEAD(&pcie->resources);
@@ -1179,13 +1178,58 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
                                         resource_size(&pcie->io) - 1);
                pcie->realio.name = "PCI I/O";
 
+               pci_add_resource(&pcie->resources, &pcie->realio);
+       }
+
+       return devm_request_pci_bus_resources(dev, &pcie->resources);
+}
+
+/*
+ * This is a copy of pci_host_probe(), except that it does the I/O
+ * remap as the last step, once we are sure we won't fail.
+ *
+ * It should be removed once the I/O remap error handling issue has
+ * been sorted out.
+ */
+static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
+{
+       struct mvebu_pcie *pcie;
+       struct pci_bus *bus, *child;
+       int ret;
+
+       ret = pci_scan_root_bus_bridge(bridge);
+       if (ret < 0) {
+               dev_err(bridge->dev.parent, "Scanning root bridge failed");
+               return ret;
+       }
+
+       pcie = pci_host_bridge_priv(bridge);
+       if (resource_size(&pcie->io) != 0) {
+               unsigned int i;
+
                for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
                        pci_ioremap_io(i, pcie->io.start + i);
+       }
 
-               pci_add_resource(&pcie->resources, &pcie->realio);
+       bus = bridge->bus;
+
+       /*
+        * We insert PCI resources into the iomem_resource and
+        * ioport_resource trees in either pci_bus_claim_resources()
+        * or pci_bus_assign_resources().
+        */
+       if (pci_has_flag(PCI_PROBE_ONLY)) {
+               pci_bus_claim_resources(bus);
+       } else {
+               pci_bus_size_bridges(bus);
+               pci_bus_assign_resources(bus);
+
+               list_for_each_entry(child, &bus->children, node)
+                       pcie_bus_configure_settings(child);
        }
 
-       return devm_request_pci_bus_resources(dev, &pcie->resources);
+       pci_bus_add_devices(bus);
+       return 0;
 }
 
 static int mvebu_pcie_probe(struct platform_device *pdev)
@@ -1268,7 +1312,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
        bridge->align_resource = mvebu_pcie_align_resource;
        bridge->msi = pcie->msi;
 
-       return pci_host_probe(bridge);
+       return mvebu_pci_host_probe(bridge);
 }
 
 static const struct of_device_id mvebu_pcie_of_match_table[] = {
index ef0b1b6ba86f8fad2a570187252e7579e12ea129..12afa7fdf77e9569d78f517a77b01de129151937 100644 (file)
@@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
 /**
  * enable_slot - enable, configure a slot
  * @slot: slot to be enabled
+ * @bridge: true if enable is for the whole bridge (not a single slot)
  *
  * This function should be called per *physical slot*,
  * not per each slot object in ACPI namespace.
  */
-static void enable_slot(struct acpiphp_slot *slot)
+static void enable_slot(struct acpiphp_slot *slot, bool bridge)
 {
        struct pci_dev *dev;
        struct pci_bus *bus = slot->bus;
        struct acpiphp_func *func;
 
-       if (bus->self && hotplug_is_native(bus->self)) {
+       if (bridge && bus->self && hotplug_is_native(bus->self)) {
                /*
                 * If native hotplug is used, it will take care of hotplug
                 * slot management and resource allocation for hotplug
@@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                                        trim_stale_devices(dev);
 
                        /* configure all functions */
-                       enable_slot(slot);
+                       enable_slot(slot, true);
                } else {
                        disable_slot(slot);
                }
@@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context)
                if (bridge)
                        acpiphp_check_bridge(bridge);
                else if (!(slot->flags & SLOT_IS_GOING_AWAY))
-                       enable_slot(slot);
+                       enable_slot(slot, false);
 
                break;
 
@@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
 
        /* configure all functions */
        if (!(slot->flags & SLOT_ENABLED))
-               enable_slot(slot);
+               enable_slot(slot, false);
 
        pci_unlock_rescan_remove();
        return 0;
index 1835f3a7aa8d2f5a502a0629bfb0c0cc96420dd4..51b6c81671c1e21baba57422cd90d8727992f8e1 100644 (file)
@@ -1289,12 +1289,12 @@ int pci_save_state(struct pci_dev *dev)
 EXPORT_SYMBOL(pci_save_state);
 
 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
-                                    u32 saved_val, int retry)
+                                    u32 saved_val, int retry, bool force)
 {
        u32 val;
 
        pci_read_config_dword(pdev, offset, &val);
-       if (val == saved_val)
+       if (!force && val == saved_val)
                return;
 
        for (;;) {
@@ -1313,25 +1313,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
 }
 
 static void pci_restore_config_space_range(struct pci_dev *pdev,
-                                          int start, int end, int retry)
+                                          int start, int end, int retry,
+                                          bool force)
 {
        int index;
 
        for (index = end; index >= start; index--)
                pci_restore_config_dword(pdev, 4 * index,
                                         pdev->saved_config_space[index],
-                                        retry);
+                                        retry, force);
 }
 
 static void pci_restore_config_space(struct pci_dev *pdev)
 {
        if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-               pci_restore_config_space_range(pdev, 10, 15, 0);
+               pci_restore_config_space_range(pdev, 10, 15, 0, false);
                /* Restore BARs before the command register. */
-               pci_restore_config_space_range(pdev, 4, 9, 10);
-               pci_restore_config_space_range(pdev, 0, 3, 0);
+               pci_restore_config_space_range(pdev, 4, 9, 10, false);
+               pci_restore_config_space_range(pdev, 0, 3, 0, false);
+       } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+               pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+               /*
+                * Force rewriting of prefetch registers to avoid S3 resume
+                * issues on Intel PCI bridges that occur when these
+                * registers are not explicitly written.
+                */
+               pci_restore_config_space_range(pdev, 9, 11, 0, true);
+               pci_restore_config_space_range(pdev, 0, 8, 0, false);
        } else {
-               pci_restore_config_space_range(pdev, 0, 15, 0);
+               pci_restore_config_space_range(pdev, 0, 15, 0, false);
        }
 }
 
index 8d48371caaa2df51568fd70996e2b4594a24a311..e7f45d96b0cbd61e4cf7adfb050fbde547bcf752 100644 (file)
 
 #include "pinctrl-intel.h"
 
-#define CNL_PAD_OWN    0x020
-#define CNL_PADCFGLOCK 0x080
-#define CNL_HOSTSW_OWN 0x0b0
-#define CNL_GPI_IE     0x120
+#define CNL_PAD_OWN            0x020
+#define CNL_PADCFGLOCK         0x080
+#define CNL_LP_HOSTSW_OWN      0x0b0
+#define CNL_H_HOSTSW_OWN       0x0c0
+#define CNL_GPI_IE             0x120
 
 #define CNL_GPP(r, s, e, g)                            \
        {                                               \
 
 #define CNL_NO_GPIO    -1
 
-#define CNL_COMMUNITY(b, s, e, g)                      \
+#define CNL_COMMUNITY(b, s, e, o, g)                   \
        {                                               \
                .barno = (b),                           \
                .padown_offset = CNL_PAD_OWN,           \
                .padcfglock_offset = CNL_PADCFGLOCK,    \
-               .hostown_offset = CNL_HOSTSW_OWN,       \
+               .hostown_offset = (o),                  \
                .ie_offset = CNL_GPI_IE,                \
                .pin_base = (s),                        \
                .npins = ((e) - (s) + 1),               \
                .ngpps = ARRAY_SIZE(g),                 \
        }
 
+#define CNLLP_COMMUNITY(b, s, e, g)                    \
+       CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g)
+
+#define CNLH_COMMUNITY(b, s, e, g)                     \
+       CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g)
+
 /* Cannon Lake-H */
 static const struct pinctrl_pin_desc cnlh_pins[] = {
        /* GPP_A */
@@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = {
 };
 
 static const struct intel_community cnlh_communities[] = {
-       CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
-       CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
-       CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
-       CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
+       CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
+       CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
+       CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
+       CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
 };
 
 static const struct intel_pinctrl_soc_data cnlh_soc_data = {
@@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = {
 };
 
 static const struct intel_community cnllp_communities[] = {
-       CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
-       CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
-       CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
+       CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
+       CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
+       CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
 };
 
 static const struct intel_pinctrl_soc_data cnllp_soc_data = {
index ec8dafc946943261bbefa67d948a03a6cce9fffa..1ea3438ea67e925aa82b6e57e503efb72689fde3 100644 (file)
@@ -887,36 +887,6 @@ static const struct gpio_chip intel_gpio_chip = {
        .set_config = gpiochip_generic_config,
 };
 
-static int intel_gpio_irq_reqres(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
-       int pin;
-       int ret;
-
-       pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
-       if (pin >= 0) {
-               ret = gpiochip_lock_as_irq(gc, pin);
-               if (ret) {
-                       dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n",
-                               pin);
-                       return ret;
-               }
-       }
-       return 0;
-}
-
-static void intel_gpio_irq_relres(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
-       int pin;
-
-       pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
-       if (pin >= 0)
-               gpiochip_unlock_as_irq(gc, pin);
-}
-
 static void intel_gpio_irq_ack(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1132,8 +1102,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
 
 static struct irq_chip intel_gpio_irqchip = {
        .name = "intel-gpio",
-       .irq_request_resources = intel_gpio_irq_reqres,
-       .irq_release_resources = intel_gpio_irq_relres,
        .irq_enable = intel_gpio_irq_enable,
        .irq_ack = intel_gpio_irq_ack,
        .irq_mask = intel_gpio_irq_mask,
index 41ccc759b8b8867a09b992371eafafca35899a0e..1425c2874d4028b5140cc74933733a2bf4f8f22b 100644 (file)
@@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
        unsigned long flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
-       u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF);
 
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
        pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
        pin_reg |= BIT(INTERRUPT_MASK_OFF);
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
-       /*
-        * When debounce logic is enabled it takes ~900 us before interrupts
-        * can be enabled.  During this "debounce warm up" period the
-        * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it
-        * reads back as 1, signaling that interrupts are now enabled.
-        */
-       while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
-               continue;
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 }
 
@@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
 static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
        int ret = 0;
-       u32 pin_reg;
+       u32 pin_reg, pin_reg_irq_en, mask;
        unsigned long flags, irq_flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
@@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        }
 
        pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
+       /*
+        * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
+        * debounce registers of any GPIO will block wake/interrupt status
+        * generation for *all* GPIOs for a lenght of time that depends on
+        * WAKE_INT_MASTER_REG.MaskStsLength[11:0].  During this period the
+        * INTERRUPT_ENABLE bit will read as 0.
+        *
+        * We temporarily enable irq for the GPIO whose configuration is
+        * changing, and then wait for it to read back as 1 to know when
+        * debounce has settled and then disable the irq again.
+        * We do this polling with the spinlock held to ensure other GPIO
+        * access routines do not read an incorrect value for the irq enable
+        * bit of other GPIOs.  We keep the GPIO masked while polling to avoid
+        * spurious irqs, and disable the irq again after polling.
+        */
+       mask = BIT(INTERRUPT_ENABLE_OFF);
+       pin_reg_irq_en = pin_reg;
+       pin_reg_irq_en |= mask;
+       pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
+       writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
+       while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
+               continue;
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
index 0f8ac8dec3e16a21e066e9f76318d14ec6c9d78f..a1bd8aaf4d983bcd8b8bdfa10ae944918fabeb75 100644 (file)
@@ -569,6 +569,25 @@ static int bd71837_probe(struct platform_device *pdev)
                        BD71837_REG_REGLOCK);
        }
 
+       /*
+        * There is a HW quirk in BD71837. The shutdown sequence timings for
+        * bucks/LDOs which are controlled via register interface are changed.
+        * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the
+        * beginning of shut-down sequence. As bucks 6 and 7 are parent
+        * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage
+        * monitoring to errorneously detect under voltage and force PMIC to
+        * emergency state instead of poweroff. In order to avoid this we
+        * disable voltage monitoring for LDO5 and LDO6
+        */
+       err = regmap_update_bits(pmic->mfd->regmap, BD718XX_REG_MVRFLTMASK2,
+                                BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80,
+                                BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80);
+       if (err) {
+               dev_err(&pmic->pdev->dev,
+                       "Failed to disable voltage monitoring\n");
+               goto err;
+       }
+
        for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
 
                struct regulator_desc *desc;
index bb1324f93143f66e609fea2602329230f70ed4ce..9577d89418468a06f1030ff69d2699f71b5710bc 100644 (file)
@@ -3161,7 +3161,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
        if (!rstate->changeable)
                return -EPERM;
 
-       rstate->enabled = en;
+       rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND;
 
        return 0;
 }
@@ -4395,13 +4395,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
            !rdev->desc->fixed_uV)
                rdev->is_switch = true;
 
+       dev_set_drvdata(&rdev->dev, rdev);
        ret = device_register(&rdev->dev);
        if (ret != 0) {
                put_device(&rdev->dev);
                goto unset_supplies;
        }
 
-       dev_set_drvdata(&rdev->dev, rdev);
        rdev_init_debugfs(rdev);
 
        /* try to resolve regulators supply since a new one was registered */
index 638f17d4c8485e11fa7d0a9486fc3ec131a341cd..210fc20f7de7a9cd26dbbee68e24a7c0bc2dc94a 100644 (file)
@@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np,
                else if (of_property_read_bool(suspend_np,
                                        "regulator-off-in-suspend"))
                        suspend_state->enabled = DISABLE_IN_SUSPEND;
-               else
-                       suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
 
                if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
                                          &pval))
index de8282420f966f0d0f984c72868d9ebddcf11207..ffce6f39828aa1799c49975df1d85fe2d8ccbe38 100644 (file)
@@ -610,7 +610,7 @@ static void qeth_put_reply(struct qeth_reply *reply)
 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
                struct qeth_card *card)
 {
-       char *ipa_name;
+       const char *ipa_name;
        int com = cmd->hdr.command;
        ipa_name = qeth_get_ipa_cmd_name(com);
        if (rc)
index 5bcb8dafc3ee506f9ff3487bdf8e01cf274e35b2..e891c0b52f4ccc79995b6c60a8a05e6e12f92005 100644 (file)
@@ -148,10 +148,10 @@ EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
 
 struct ipa_rc_msg {
        enum qeth_ipa_return_codes rc;
-       char *msg;
+       const char *msg;
 };
 
-static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
+static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
        {IPA_RC_SUCCESS,                "success"},
        {IPA_RC_NOTSUPP,                "Command not supported"},
        {IPA_RC_IP_TABLE_FULL,          "Add Addr IP Table Full - ipv6"},
@@ -219,23 +219,23 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
 
 
 
-char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
+const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
 {
-       int x = 0;
-       qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
-                       sizeof(struct ipa_rc_msg) - 1].rc = rc;
-       while (qeth_ipa_rc_msg[x].rc != rc)
-               x++;
+       int x;
+
+       for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
+               if (qeth_ipa_rc_msg[x].rc == rc)
+                       return qeth_ipa_rc_msg[x].msg;
        return qeth_ipa_rc_msg[x].msg;
 }
 
 
 struct ipa_cmd_names {
        enum qeth_ipa_cmds cmd;
-       char *name;
+       const char *name;
 };
 
-static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
+static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
        {IPA_CMD_STARTLAN,      "startlan"},
        {IPA_CMD_STOPLAN,       "stoplan"},
        {IPA_CMD_SETVMAC,       "setvmac"},
@@ -267,13 +267,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
        {IPA_CMD_UNKNOWN,       "unknown"},
 };
 
-char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
+const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
 {
-       int x = 0;
-       qeth_ipa_cmd_names[
-               sizeof(qeth_ipa_cmd_names) /
-                       sizeof(struct ipa_cmd_names)-1].cmd = cmd;
-       while (qeth_ipa_cmd_names[x].cmd != cmd)
-               x++;
+       int x;
+
+       for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
+               if (qeth_ipa_cmd_names[x].cmd == cmd)
+                       return qeth_ipa_cmd_names[x].name;
        return qeth_ipa_cmd_names[x].name;
 }
index aa8b9196b089e0c9b2788493d5103dd5b38ad33d..aa5de1fe01e10068b8913d814c27a9a63bdc95d9 100644 (file)
@@ -797,8 +797,8 @@ enum qeth_ipa_arp_return_codes {
        QETH_IPA_ARP_RC_Q_NO_DATA    = 0x0008,
 };
 
-extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
-extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
+extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
+extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
 
 #define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
                               sizeof(struct qeth_ipacmd_setassparms_hdr))
index cc8e64dc65ad896d8d5ce3e7fa7495d88d95ccfb..e5bd035ebad0f7c7bc5e40ef4a6b3bbafb53df3c 100644 (file)
@@ -2472,6 +2472,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
                /* start qedi context */
                spin_lock_init(&qedi->hba_lock);
                spin_lock_init(&qedi->task_idx_lock);
+               mutex_init(&qedi->stats_lock);
        }
        qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
        qedi_ops->ll2->start(qedi->cdev, &params);
index ecb22749df0bfa4a4fc0596f8eb32a9b693c5004..8cc0151830433230e8f629c2660eec90871b7e8c 100644 (file)
@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
 {
        unsigned long addr;
 
+       if (!p)
+               return -ENODEV;
+
        addr = gen_pool_alloc(p, cnt);
        if (!addr)
                return -ENOMEM;
index c646d871386130d5dde7df2619f3b7fa7bcf5af6..681f7d4b7724fd2037257fab596fc13fc3bbc7d5 100644 (file)
@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
 {
        u32 shift;
 
-       shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
+       shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
        shift -= tdm_num * 2;
 
        return shift;
index 0626e6e3ea0c05dee66e177aa4d2a0c75b703247..421bfc7dda67413bd72ae96055c3767d01a33fa7 100644 (file)
@@ -300,8 +300,8 @@ static int spi_gpio_request(struct device *dev,
                *mflags |= SPI_MASTER_NO_RX;
 
        spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
-       if (IS_ERR(spi_gpio->mosi))
-               return PTR_ERR(spi_gpio->mosi);
+       if (IS_ERR(spi_gpio->sck))
+               return PTR_ERR(spi_gpio->sck);
 
        for (i = 0; i < num_chipselects; i++) {
                spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
index 95dc4d78618df29c2b15d92fa6d11e3b84bb7ed1..b37de1d991d6abe1e0a25c5fffcf04d9851aba3e 100644 (file)
@@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
 
        ret = wait_event_interruptible_timeout(rspi->wait,
                                               rspi->dma_callbacked, HZ);
-       if (ret > 0 && rspi->dma_callbacked)
+       if (ret > 0 && rspi->dma_callbacked) {
                ret = 0;
-       else if (!ret) {
-               dev_err(&rspi->master->dev, "DMA timeout\n");
-               ret = -ETIMEDOUT;
+       } else {
+               if (!ret) {
+                       dev_err(&rspi->master->dev, "DMA timeout\n");
+                       ret = -ETIMEDOUT;
+               }
                if (tx)
                        dmaengine_terminate_all(rspi->master->dma_tx);
                if (rx)
@@ -1350,12 +1352,36 @@ static const struct platform_device_id spi_driver_ids[] = {
 
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+       return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+       return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS     &rspi_pm_ops
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver rspi_driver = {
        .probe =        rspi_probe,
        .remove =       rspi_remove,
        .id_table =     spi_driver_ids,
        .driver         = {
                .name = "renesas_spi",
+               .pm = DEV_PM_OPS,
                .of_match_table = of_match_ptr(rspi_of_match),
        },
 };
index 539d6d1a277a6179f7053698cbee772083354828..101cd6aae2ea520afcac89671071cdabe2341f8f 100644 (file)
@@ -397,7 +397,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
 
 static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 {
-       sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+       sh_msiof_write(p, STR,
+                      sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
 }
 
 static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1426,12 +1427,37 @@ static const struct platform_device_id spi_driver_ids[] = {
 };
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+       return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+       return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+                        sh_msiof_spi_resume);
+#define DEV_PM_OPS     &sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver sh_msiof_spi_drv = {
        .probe          = sh_msiof_spi_probe,
        .remove         = sh_msiof_spi_remove,
        .id_table       = spi_driver_ids,
        .driver         = {
                .name           = "spi_sh_msiof",
+               .pm             = DEV_PM_OPS,
                .of_match_table = of_match_ptr(sh_msiof_match),
        },
 };
index 6f7b946b5cedf103cbed9886d7ec3079bbdcf6de..1427f343b39a3dc4468e14c3612814508993b3c0 100644 (file)
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
                goto exit_free_master;
        }
 
+       /* disabled clock may cause interrupt storm upon request */
+       tspi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(tspi->clk)) {
+               ret = PTR_ERR(tspi->clk);
+               dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+               goto exit_free_master;
+       }
+       ret = clk_prepare(tspi->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+               goto exit_free_master;
+       }
+       ret = clk_enable(tspi->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+               goto exit_free_master;
+       }
+
        spi_irq = platform_get_irq(pdev, 0);
        tspi->irq = spi_irq;
        ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
                                        tspi->irq);
-               goto exit_free_master;
-       }
-
-       tspi->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(tspi->clk)) {
-               dev_err(&pdev->dev, "can not get clock\n");
-               ret = PTR_ERR(tspi->clk);
-               goto exit_free_irq;
+               goto exit_clk_disable;
        }
 
        tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
        tegra_slink_deinit_dma_param(tspi, true);
 exit_free_irq:
        free_irq(spi_irq, tspi);
+exit_clk_disable:
+       clk_disable(tspi->clk);
 exit_free_master:
        spi_master_put(master);
        return ret;
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
 
        free_irq(tspi->irq, tspi);
 
+       clk_disable(tspi->clk);
+
        if (tspi->tx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, false);
 
index e1e264a9a4c76f11f1553327d59deb0fd8f43168..28fc4ce75edb49ddfe68fc00796fa0934a8df8ff 100644 (file)
@@ -738,14 +738,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
        u8 link, depth;
        u64 route;
 
-       /*
-        * After NVM upgrade adding root switch device fails because we
-        * initiated reset. During that time ICM might still send
-        * XDomain connected message which we ignore here.
-        */
-       if (!tb->root_switch)
-               return;
-
        link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
        depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
                ICM_LINK_INFO_DEPTH_SHIFT;
@@ -1037,14 +1029,6 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
        if (pkg->hdr.packet_id)
                return;
 
-       /*
-        * After NVM upgrade adding root switch device fails because we
-        * initiated reset. During that time ICM might still send device
-        * connected message which we ignore here.
-        */
-       if (!tb->root_switch)
-               return;
-
        route = get_route(pkg->route_hi, pkg->route_lo);
        authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
        security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
@@ -1408,19 +1392,26 @@ static void icm_handle_notification(struct work_struct *work)
 
        mutex_lock(&tb->lock);
 
-       switch (n->pkg->code) {
-       case ICM_EVENT_DEVICE_CONNECTED:
-               icm->device_connected(tb, n->pkg);
-               break;
-       case ICM_EVENT_DEVICE_DISCONNECTED:
-               icm->device_disconnected(tb, n->pkg);
-               break;
-       case ICM_EVENT_XDOMAIN_CONNECTED:
-               icm->xdomain_connected(tb, n->pkg);
-               break;
-       case ICM_EVENT_XDOMAIN_DISCONNECTED:
-               icm->xdomain_disconnected(tb, n->pkg);
-               break;
+       /*
+        * When the domain is stopped we flush its workqueue but before
+        * that the root switch is removed. In that case we should treat
+        * the queued events as being canceled.
+        */
+       if (tb->root_switch) {
+               switch (n->pkg->code) {
+               case ICM_EVENT_DEVICE_CONNECTED:
+                       icm->device_connected(tb, n->pkg);
+                       break;
+               case ICM_EVENT_DEVICE_DISCONNECTED:
+                       icm->device_disconnected(tb, n->pkg);
+                       break;
+               case ICM_EVENT_XDOMAIN_CONNECTED:
+                       icm->xdomain_connected(tb, n->pkg);
+                       break;
+               case ICM_EVENT_XDOMAIN_DISCONNECTED:
+                       icm->xdomain_disconnected(tb, n->pkg);
+                       break;
+               }
        }
 
        mutex_unlock(&tb->lock);
index 88cff05a18085d7751452960a53fda140044765e..5cd6bdfa068f9bc91fe2e83bedcd4e81160230c4 100644 (file)
@@ -1191,5 +1191,5 @@ static void __exit nhi_unload(void)
        tb_domain_exit();
 }
 
-fs_initcall(nhi_init);
+rootfs_initcall(nhi_init);
 module_exit(nhi_unload);
index fa8dcb470640f7d97b8a11652d2b418ac2f9d0a1..d31b975dd3fd7b7c4ac052390bca7884b2bff468 100644 (file)
@@ -630,10 +630,6 @@ static int dw8250_probe(struct platform_device *pdev)
        if (!data->skip_autocfg)
                dw8250_setup_port(p);
 
-#ifdef CONFIG_PM
-       uart.capabilities |= UART_CAP_RPM;
-#endif
-
        /* If we have a valid fifosize, try hooking up DMA */
        if (p->fifosize) {
                data->dma.rxconf.src_maxburst = p->fifosize / 4;
index ac4424bf6b136cc43093dec459128637f7da5947..ab3f6e91853da3c269cdb6c22c82226195778ece 100644 (file)
@@ -291,6 +291,33 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
                .error_clear = SCIF_ERROR_CLEAR,
        },
 
+       /*
+        * The "SCIFA" that is in RZ/T and RZ/A2.
+        * It looks like a normal SCIF with FIFO data, but with a
+        * compressed address space. Also, the break out of interrupts
+        * are different: ERI/BRI, RXI, TXI, TEI, DRI.
+        */
+       [SCIx_RZ_SCIFA_REGTYPE] = {
+               .regs = {
+                       [SCSMR]         = { 0x00, 16 },
+                       [SCBRR]         = { 0x02,  8 },
+                       [SCSCR]         = { 0x04, 16 },
+                       [SCxTDR]        = { 0x06,  8 },
+                       [SCxSR]         = { 0x08, 16 },
+                       [SCxRDR]        = { 0x0A,  8 },
+                       [SCFCR]         = { 0x0C, 16 },
+                       [SCFDR]         = { 0x0E, 16 },
+                       [SCSPTR]        = { 0x10, 16 },
+                       [SCLSR]         = { 0x12, 16 },
+               },
+               .fifosize = 16,
+               .overrun_reg = SCLSR,
+               .overrun_mask = SCLSR_ORER,
+               .sampling_rate_mask = SCI_SR(32),
+               .error_mask = SCIF_DEFAULT_ERROR_MASK,
+               .error_clear = SCIF_ERROR_CLEAR,
+       },
+
        /*
         * Common SH-3 SCIF definitions.
         */
@@ -319,15 +346,15 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
        [SCIx_SH4_SCIF_REGTYPE] = {
                .regs = {
                        [SCSMR]         = { 0x00, 16 },
-                       [SCBRR]         = { 0x02,  8 },
-                       [SCSCR]         = { 0x04, 16 },
-                       [SCxTDR]        = { 0x06,  8 },
-                       [SCxSR]         = { 0x08, 16 },
-                       [SCxRDR]        = { 0x0a,  8 },
-                       [SCFCR]         = { 0x0c, 16 },
-                       [SCFDR]         = { 0x0e, 16 },
-                       [SCSPTR]        = { 0x10, 16 },
-                       [SCLSR]         = { 0x12, 16 },
+                       [SCBRR]         = { 0x04,  8 },
+                       [SCSCR]         = { 0x08, 16 },
+                       [SCxTDR]        = { 0x0c,  8 },
+                       [SCxSR]         = { 0x10, 16 },
+                       [SCxRDR]        = { 0x14,  8 },
+                       [SCFCR]         = { 0x18, 16 },
+                       [SCFDR]         = { 0x1c, 16 },
+                       [SCSPTR]        = { 0x20, 16 },
+                       [SCLSR]         = { 0x24, 16 },
                },
                .fifosize = 16,
                .overrun_reg = SCLSR,
@@ -2810,7 +2837,7 @@ static int sci_init_single(struct platform_device *dev,
 {
        struct uart_port *port = &sci_port->port;
        const struct resource *res;
-       unsigned int i, regtype;
+       unsigned int i;
        int ret;
 
        sci_port->cfg   = p;
@@ -2847,7 +2874,6 @@ static int sci_init_single(struct platform_device *dev,
        if (unlikely(sci_port->params == NULL))
                return -EINVAL;
 
-       regtype = sci_port->params - sci_port_params;
        switch (p->type) {
        case PORT_SCIFB:
                sci_port->rx_trigger = 48;
@@ -2902,10 +2928,6 @@ static int sci_init_single(struct platform_device *dev,
                        port->regshift = 1;
        }
 
-       if (regtype == SCIx_SH4_SCIF_REGTYPE)
-               if (sci_port->reg_size >= 0x20)
-                       port->regshift = 1;
-
        /*
         * The UART port needs an IRQ value, so we peg this to the RX IRQ
         * for the multi-IRQ ports, which is where we are primarily
@@ -3110,6 +3132,10 @@ static const struct of_device_id of_sci_match[] = {
                .compatible = "renesas,scif-r7s72100",
                .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
        },
+       {
+               .compatible = "renesas,scif-r7s9210",
+               .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
+       },
        /* Family-specific types */
        {
                .compatible = "renesas,rcar-gen1-scif",
index f9b40a9dc4d33eb458d6a3469a0d41acdeab404b..bc03b0a690b4d166b9984d2677642797a421551f 100644 (file)
@@ -1514,6 +1514,7 @@ static void acm_disconnect(struct usb_interface *intf)
 {
        struct acm *acm = usb_get_intfdata(intf);
        struct tty_struct *tty;
+       int i;
 
        /* sibling interface is already cleaning up */
        if (!acm)
@@ -1544,6 +1545,11 @@ static void acm_disconnect(struct usb_interface *intf)
 
        tty_unregister_device(acm_tty_driver, acm->minor);
 
+       usb_free_urb(acm->ctrlurb);
+       for (i = 0; i < ACM_NW; i++)
+               usb_free_urb(acm->wb[i].urb);
+       for (i = 0; i < acm->rx_buflimit; i++)
+               usb_free_urb(acm->read_urbs[i]);
        acm_write_buffers_free(acm);
        usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
        acm_read_buffers_free(acm);
index 7334da9e97794c11038f7d76a5d223e404ec7dfc..71d0d33c3286254b0327646720161df6ba5cc1e9 100644 (file)
@@ -642,10 +642,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
        xhci_mtk_host_enable(mtk);
 
        xhci_dbg(xhci, "%s: restart port polling\n", __func__);
-       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       usb_hcd_poll_rh_status(hcd);
        set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
        usb_hcd_poll_rh_status(xhci->shared_hcd);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       usb_hcd_poll_rh_status(hcd);
        return 0;
 }
 
index 6372edf339d91d11cea115226bb9685d7ebebe45..722860eb5a91f5bcec455871320f6c5d6e95e535 100644 (file)
@@ -185,6 +185,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
            (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
                xhci->quirks |= XHCI_MISSING_CAS;
index 0215b70c4efc7893d7b60d633fc37cc999ae9ff5..e72ad9f81c73973b86bd7992c0f331039a8192bf 100644 (file)
@@ -561,6 +561,9 @@ static void option_instat_callback(struct urb *urb);
 /* Interface is reserved */
 #define RSVD(ifnum)    ((BIT(ifnum) & 0xff) << 0)
 
+/* Interface must have two endpoints */
+#define NUMEP2         BIT(16)
+
 
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1081,8 +1084,9 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
          .driver_info = RSVD(4) },
-       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
-         .driver_info = RSVD(4) | RSVD(5) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
+         .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1999,6 +2003,13 @@ static int option_probe(struct usb_serial *serial,
        if (device_flags & RSVD(iface_desc->bInterfaceNumber))
                return -ENODEV;
 
+       /*
+        * Allow matching on bNumEndpoints for devices whose interface numbers
+        * can change (e.g. Quectel EP06).
+        */
+       if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2)
+               return -ENODEV;
+
        /* Store the device flags so we can use them during attach. */
        usb_set_serial_data(serial, (void *)device_flags);
 
index 40864c2bd9dc0ad73e8c5dd0701f621c4302712c..4d0273508043de920cc4868eab9722deabf4ba40 100644 (file)
@@ -84,7 +84,8 @@ DEVICE(moto_modem, MOTO_IDS);
 
 /* Motorola Tetra driver */
 #define MOTOROLA_TETRA_IDS()                   \
-       { USB_DEVICE(0x0cad, 0x9011) }  /* Motorola Solutions TETRA PEI */
+       { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
+       { USB_DEVICE(0x0cad, 0x9012) }  /* MTP6550 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
 /* Novatel Wireless GPS driver */
index 3946649b85c8908f4e9874b837ccdad274af8499..ba906876cc454f5e67865ad7af69ee3b37f5f059 100644 (file)
@@ -42,6 +42,7 @@ struct bmp_dib_header {
        u32 colors_important;
 } __packed;
 
+static bool use_bgrt = true;
 static bool request_mem_succeeded = false;
 static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
 
@@ -160,6 +161,9 @@ static void efifb_show_boot_graphics(struct fb_info *info)
        void *bgrt_image = NULL;
        u8 *dst = info->screen_base;
 
+       if (!use_bgrt)
+               return;
+
        if (!bgrt_tab.image_address) {
                pr_info("efifb: No BGRT, not showing boot graphics\n");
                return;
@@ -290,6 +294,8 @@ static int efifb_setup(char *options)
                                screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
                        else if (!strcmp(this_opt, "nowc"))
                                mem_flags &= ~EFI_MEMORY_WC;
+                       else if (!strcmp(this_opt, "nobgrt"))
+                               use_bgrt = false;
                }
        }
 
index ef69273074ba706752b52076b83e2cd070210fb2..a3edb20ea4c36094104e1cc45bfd30c976b0b41e 100644 (file)
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
        if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
                return -EFAULT;
 
+       if (mr->w > 4096 || mr->h > 4096)
+               return -EINVAL;
+
        if (mr->w * mr->h * 3 > mr->buffer_size)
                return -EINVAL;
 
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
                        mr->x, mr->y, mr->w, mr->h);
 
        if (r > 0) {
-               if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+               if (copy_to_user(mr->buffer, buf, r))
                        r = -EFAULT;
        }
 
index def3a501acd64484342f2315c9b32fe697b166a5..d059d04c63acd7bc118bbc0ec02fc3cb1dfa41f8 100644 (file)
@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
        /*
         * enable controller clock
         */
-       clk_enable(fbi->clk);
+       clk_prepare_enable(fbi->clk);
 
        pxa168fb_set_par(info);
 
@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
 failed_free_cmap:
        fb_dealloc_cmap(&info->cmap);
 failed_free_clk:
-       clk_disable(fbi->clk);
+       clk_disable_unprepare(fbi->clk);
 failed_free_fbmem:
        dma_free_coherent(fbi->dev, info->fix.smem_len,
                        info->screen_base, fbi->fb_start_dma);
@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
        dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
                    info->screen_base, info->fix.smem_start);
 
-       clk_disable(fbi->clk);
+       clk_disable_unprepare(fbi->clk);
 
        framebuffer_release(info);
 
index 045e8afe398be35866adb64d774c0ebf0dd9834c..9e88e3f594c29c4d4a0c7362500b494fbf0ca2db 100644 (file)
@@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
                        dev_name);
                   goto out_err0;
                }
-               /* fall though */
+               /* fall through */
        case S9000_ID_ARTIST:
        case S9000_ID_HCRX:
        case S9000_ID_TIMBER:
index 0c9ab62c3df45ae3a450a6a488e90bbd1a62ea61..9dcaed031843caeee3382a647eda0c71cc18d881 100644 (file)
@@ -1553,6 +1553,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
 
 /* Flags */
 #define   MID_WAIT_CANCELLED    1 /* Cancelled while waiting for response */
+#define   MID_DELETED            2 /* Mid has been dequeued/deleted */
 
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
index 7aa08dba4719cde8c88c18bcb5ee4a4ddae733f1..52d71b64c0c6e5f954d408098fc78197898399c7 100644 (file)
@@ -659,7 +659,15 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
                mid->mid_state = MID_RESPONSE_RECEIVED;
        else
                mid->mid_state = MID_RESPONSE_MALFORMED;
-       list_del_init(&mid->qhead);
+       /*
+        * Trying to handle/dequeue a mid after the send_recv()
+        * function has finished processing it is a bug.
+        */
+       if (mid->mid_flags & MID_DELETED)
+               printk_once(KERN_WARNING
+                           "trying to dequeue a deleted mid\n");
+       else
+               list_del_init(&mid->qhead);
        spin_unlock(&GlobalMid_Lock);
 }
 
@@ -938,8 +946,7 @@ cifs_demultiplex_thread(void *p)
                } else {
                        mids[0] = server->ops->find_mid(server, buf);
                        bufs[0] = buf;
-                       if (mids[0])
-                               num_mids = 1;
+                       num_mids = 1;
 
                        if (!mids[0] || !mids[0]->receive)
                                length = standard_receive3(server, mids[0]);
index d954ce36b4734c06ca63e2fdb0343f6109d2ec57..89985a0a6819e3a2a3348a0c4870157f7220665e 100644 (file)
@@ -1477,7 +1477,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        }
 
        srch_inf->entries_in_buffer = 0;
-       srch_inf->index_of_last_entry = 0;
+       srch_inf->index_of_last_entry = 2;
 
        rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
                                  fid->volatile_fid, 0, srch_inf);
index 78f96fa3d7d990217014395edc043cf422f6252d..b48f43963da6afcc7ff7da565477b00e67d92877 100644 (file)
@@ -142,7 +142,8 @@ void
 cifs_delete_mid(struct mid_q_entry *mid)
 {
        spin_lock(&GlobalMid_Lock);
-       list_del(&mid->qhead);
+       list_del_init(&mid->qhead);
+       mid->mid_flags |= MID_DELETED;
        spin_unlock(&GlobalMid_Lock);
 
        DeleteMidQEntry(mid);
@@ -772,6 +773,11 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
        return mid;
 }
 
+static void
+cifs_noop_callback(struct mid_q_entry *mid)
+{
+}
+
 int
 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                   const int flags, const int num_rqst, struct smb_rqst *rqst,
@@ -826,8 +832,13 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                }
 
                midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
+               /*
+                * We don't invoke the callback compounds unless it is the last
+                * request.
+                */
+               if (i < num_rqst - 1)
+                       midQ[i]->callback = cifs_noop_callback;
        }
-
        cifs_in_send_inc(ses->server);
        rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
        cifs_in_send_dec(ses->server);
@@ -908,6 +919,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                        midQ[i]->resp_buf = NULL;
        }
 out:
+       /*
+        * This will dequeue all mids. After this it is important that the
+        * demultiplex_thread will not process any of these mids any futher.
+        * This is prevented above by using a noop callback that will not
+        * wake this thread except for the very last PDU.
+        */
        for (i = 0; i < num_rqst; i++)
                cifs_delete_mid(midQ[i]);
        add_credits(ses->server, credits, optype);
index b68ce484e1be7f58ea9379cce298e82bd88ebcfd..4becbf168b7f0df3229b1e1a5d0fb8daca02df0d 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -447,6 +447,7 @@ bool dax_lock_mapping_entry(struct page *page)
                        xa_unlock_irq(&mapping->i_pages);
                        break;
                } else if (IS_ERR(entry)) {
+                       xa_unlock_irq(&mapping->i_pages);
                        WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
                        continue;
                }
index 7f7ee18fe179c258ca9ccb41148592d816ec96f6..e4bb9386c04551e1af155154213285c6da688531 100644 (file)
@@ -1448,6 +1448,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
        }
        inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+       ext2_set_inode_flags(inode);
        ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
        ei->i_frag_no = raw_inode->i_frag;
        ei->i_frag_size = raw_inode->i_fsize;
@@ -1517,7 +1518,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
        }
        brelse (bh);
-       ext2_set_inode_flags(inode);
        unlock_new_inode(inode);
        return inode;
        
index 3212c29235ce34d21dedc26ea569978d45462706..2005529af560891043170b4d86ed05c2a62f19eb 100644 (file)
@@ -230,7 +230,7 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
        ret = -EXDEV;
        if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
                goto fdput;
-       ret = do_clone_file_range(src_file.file, off, dst_file, destoff, olen);
+       ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen);
 fdput:
        fdput(src_file);
        return ret;
index 74762b1ec233f9e9ab1f5da47116271143a344db..ec15cf2ec696dae1f77a2c45fe4fd7129bbc2e29 100644 (file)
@@ -1051,6 +1051,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
        } else {
                WARN_ON_ONCE(!PageUptodate(page));
                iomap_page_create(inode, page);
+               set_page_dirty(page);
        }
 
        return length;
@@ -1090,7 +1091,6 @@ int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
                length -= ret;
        }
 
-       set_page_dirty(page);
        wait_for_stable_page(page);
        return VM_FAULT_LOCKED;
 out_unlock:
index 55a099e47ba2773e94e126285efc937391dee5d4..b53e76391e52539d11daee791bc47cc07b2ae773 100644 (file)
@@ -541,7 +541,8 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
 __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
                u64 dst_pos, u64 count)
 {
-       return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count));
+       return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
+                                            count));
 }
 
 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
index aaca0949fe53f6e383522bc3e9ae9b73124e4fc2..826f0567ec438caf6b677179be14a2460cf9a943 100644 (file)
@@ -584,9 +584,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
 
        res->last_used = 0;
 
-       spin_lock(&dlm->spinlock);
+       spin_lock(&dlm->track_lock);
        list_add_tail(&res->tracking, &dlm->tracking_list);
-       spin_unlock(&dlm->spinlock);
+       spin_unlock(&dlm->track_lock);
 
        memset(res->lvb, 0, DLM_LVB_LEN);
        memset(res->refmap, 0, sizeof(res->refmap));
index 7869622af22a2cd2ea0dfd164b1a5b3fba31cd25..7a5ee145c733f3b2547c0f85d2c1f6264cb9f240 100644 (file)
@@ -2946,6 +2946,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (map_end & (PAGE_SIZE - 1))
                        to = map_end & (PAGE_SIZE - 1);
 
+retry:
                page = find_or_create_page(mapping, page_index, GFP_NOFS);
                if (!page) {
                        ret = -ENOMEM;
@@ -2954,11 +2955,18 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                }
 
                /*
-                * In case PAGE_SIZE <= CLUSTER_SIZE, This page
-                * can't be dirtied before we CoW it out.
+                * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
+                * page, so write it back.
                 */
-               if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
-                       BUG_ON(PageDirty(page));
+               if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
+                       if (PageDirty(page)) {
+                               /*
+                                * write_on_page will unlock the page on return
+                                */
+                               ret = write_one_page(page);
+                               goto retry;
+                       }
+               }
 
                if (!PageUptodate(page)) {
                        ret = block_read_full_page(page, ocfs2_get_block);
index 296037afecdb4e689d458b54ffc597ed2265bbf9..1cc797a08a5b5f7eb6c002862c834177e5d6f93c 100644 (file)
@@ -141,7 +141,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
        }
 
        /* Try to use clone_file_range to clone up within the same fs */
-       error = vfs_clone_file_range(old_file, 0, new_file, 0, len);
+       error = do_clone_file_range(old_file, 0, new_file, 0, len);
        if (!error)
                goto out;
        /* Couldn't clone, so now we try to copy the data */
index aeaefd2a551b015d63b47cbe9ab25a204a9d3cb3..986313da0c8895352d2216f0fb0b78d3854064fb 100644 (file)
@@ -240,8 +240,10 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
                goto out_unlock;
 
        old_cred = ovl_override_creds(file_inode(file)->i_sb);
+       file_start_write(real.file);
        ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
                             ovl_iocb_to_rwf(iocb));
+       file_end_write(real.file);
        revert_creds(old_cred);
 
        /* Update size */
index b6ac545b5a32188297352dd0793565423fee2fcb..3b7ed5d2279c6a8efde8180471bde94ef1020964 100644 (file)
@@ -504,7 +504,7 @@ static const struct inode_operations ovl_special_inode_operations = {
        .update_time    = ovl_update_time,
 };
 
-const struct address_space_operations ovl_aops = {
+static const struct address_space_operations ovl_aops = {
        /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
        .direct_IO              = noop_direct_IO,
 };
index f28711846dd6ebad2cca1ad5fc3a4ef95f3806b7..9c0ca6a7becfbe56e15efd596fbc6540b4bbd859 100644 (file)
@@ -686,7 +686,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
                        index = NULL;
                        goto out;
                }
-               pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
+               pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
                                    "overlayfs: mount with '-o index=off' to disable inodes index.\n",
                                    d_inode(origin)->i_ino, name.len, name.name,
                                    err);
index f61839e1054c6b40872a6bf3dc89e3a7da7b6f52..a3c0d95843121e92a103a6b07628feb853c31399 100644 (file)
@@ -152,8 +152,8 @@ static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
                                  const void *value, size_t size, int flags)
 {
        int err = vfs_setxattr(dentry, name, value, size, flags);
-       pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n",
-                dentry, name, (int) size, (char *) value, flags, err);
+       pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0x%x) = %i\n",
+                dentry, name, min((int)size, 48), value, size, flags, err);
        return err;
 }
 
index 8cfb62cc86728029e271df468fe3b8415de1fc46..ace4fe4c39a9307aa6008702f0195a92af74627c 100644 (file)
@@ -683,7 +683,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
        struct dentry *upperdentry = ovl_dentry_upper(dentry);
        struct dentry *index = NULL;
        struct inode *inode;
-       struct qstr name;
+       struct qstr name = { };
        int err;
 
        err = ovl_get_index_name(lowerdentry, &name);
@@ -726,6 +726,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
                goto fail;
 
 out:
+       kfree(name.name);
        dput(index);
        return;
 
index ccf86f16d9f0190c18e7f4345b7ca0960709eb05..7e9f07bf260d20bb0a0cd4cd6b6b4abe82b23e20 100644 (file)
@@ -407,6 +407,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
        unsigned long *entries;
        int err;
 
+       /*
+        * The ability to racily run the kernel stack unwinder on a running task
+        * and then observe the unwinder output is scary; while it is useful for
+        * debugging kernel issues, it can also allow an attacker to leak kernel
+        * stack contents.
+        * Doing this in a manner that is at least safe from races would require
+        * some work to ensure that the remote task can not be scheduled; and
+        * even then, this would still expose the unwinder as local attack
+        * surface.
+        * Therefore, this interface is restricted to root.
+        */
+       if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+               return -EACCES;
+
        entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries),
                                GFP_KERNEL);
        if (!entries)
index bbd1e357c23df64b385f4baf119e649342c9c50e..f4fd2e72add4ebd512d4e3e499c0c07999c455df 100644 (file)
@@ -898,8 +898,22 @@ static struct platform_driver ramoops_driver = {
        },
 };
 
-static void ramoops_register_dummy(void)
+static inline void ramoops_unregister_dummy(void)
 {
+       platform_device_unregister(dummy);
+       dummy = NULL;
+
+       kfree(dummy_data);
+       dummy_data = NULL;
+}
+
+static void __init ramoops_register_dummy(void)
+{
+       /*
+        * Prepare a dummy platform data structure to carry the module
+        * parameters. If mem_size isn't set, then there are no module
+        * parameters, and we can skip this.
+        */
        if (!mem_size)
                return;
 
@@ -932,21 +946,28 @@ static void ramoops_register_dummy(void)
        if (IS_ERR(dummy)) {
                pr_info("could not create platform device: %ld\n",
                        PTR_ERR(dummy));
+               dummy = NULL;
+               ramoops_unregister_dummy();
        }
 }
 
 static int __init ramoops_init(void)
 {
+       int ret;
+
        ramoops_register_dummy();
-       return platform_driver_register(&ramoops_driver);
+       ret = platform_driver_register(&ramoops_driver);
+       if (ret != 0)
+               ramoops_unregister_dummy();
+
+       return ret;
 }
 late_initcall(ramoops_init);
 
 static void __exit ramoops_exit(void)
 {
        platform_driver_unregister(&ramoops_driver);
-       platform_device_unregister(dummy);
-       kfree(dummy_data);
+       ramoops_unregister_dummy();
 }
 module_exit(ramoops_exit);
 
index 39b4a21dd9337a157927c1f8d0d741190de11e78..8a2737f0d61d3e0fbef04107da4a11283653544b 100644 (file)
@@ -1818,8 +1818,8 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
 }
 EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
 
-int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
-               struct file *file_out, loff_t pos_out, u64 len)
+int do_clone_file_range(struct file *file_in, loff_t pos_in,
+                       struct file *file_out, loff_t pos_out, u64 len)
 {
        struct inode *inode_in = file_inode(file_in);
        struct inode *inode_out = file_inode(file_out);
@@ -1866,6 +1866,19 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
 
        return ret;
 }
+EXPORT_SYMBOL(do_clone_file_range);
+
+int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+                        struct file *file_out, loff_t pos_out, u64 len)
+{
+       int ret;
+
+       file_start_write(file_out);
+       ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len);
+       file_end_write(file_out);
+
+       return ret;
+}
 EXPORT_SYMBOL(vfs_clone_file_range);
 
 /*
index daa732550088957538842fc34f8d84228ccbde33..0d6a6a4af8616dcdc484a6f0a417167460adfa64 100644 (file)
@@ -948,17 +948,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
        int err = 0;
 
 #ifdef CONFIG_FS_POSIX_ACL
-       if (inode->i_acl) {
-               err = xattr_list_one(&buffer, &remaining_size,
-                                    XATTR_NAME_POSIX_ACL_ACCESS);
-               if (err)
-                       return err;
-       }
-       if (inode->i_default_acl) {
-               err = xattr_list_one(&buffer, &remaining_size,
-                                    XATTR_NAME_POSIX_ACL_DEFAULT);
-               if (err)
-                       return err;
+       if (IS_POSIXACL(inode)) {
+               if (inode->i_acl) {
+                       err = xattr_list_one(&buffer, &remaining_size,
+                                            XATTR_NAME_POSIX_ACL_ACCESS);
+                       if (err)
+                               return err;
+               }
+               if (inode->i_default_acl) {
+                       err = xattr_list_one(&buffer, &remaining_size,
+                                            XATTR_NAME_POSIX_ACL_DEFAULT);
+                       if (err)
+                               return err;
+               }
        }
 #endif
 
index 1e671d4eb6fa652681d5d1f97e7ddb9cc41488af..c6299f82a6e496ac00b1ef27953a5ca5313cb9f8 100644 (file)
@@ -587,7 +587,7 @@ xfs_attr_leaf_addname(
                 */
                error = xfs_attr3_leaf_to_node(args);
                if (error)
-                       goto out_defer_cancel;
+                       return error;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        return error;
@@ -675,7 +675,7 @@ xfs_attr_leaf_addname(
                        error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
                        /* bp is gone due to xfs_da_shrink_inode */
                        if (error)
-                               goto out_defer_cancel;
+                               return error;
                        error = xfs_defer_finish(&args->trans);
                        if (error)
                                return error;
@@ -693,9 +693,6 @@ xfs_attr_leaf_addname(
                error = xfs_attr3_leaf_clearflag(args);
        }
        return error;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       return error;
 }
 
 /*
@@ -738,15 +735,12 @@ xfs_attr_leaf_removename(
                error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
                /* bp is gone due to xfs_da_shrink_inode */
                if (error)
-                       goto out_defer_cancel;
+                       return error;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        return error;
        }
        return 0;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       return error;
 }
 
 /*
@@ -864,7 +858,7 @@ xfs_attr_node_addname(
                        state = NULL;
                        error = xfs_attr3_leaf_to_node(args);
                        if (error)
-                               goto out_defer_cancel;
+                               goto out;
                        error = xfs_defer_finish(&args->trans);
                        if (error)
                                goto out;
@@ -888,7 +882,7 @@ xfs_attr_node_addname(
                 */
                error = xfs_da3_split(state);
                if (error)
-                       goto out_defer_cancel;
+                       goto out;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        goto out;
@@ -984,7 +978,7 @@ xfs_attr_node_addname(
                if (retval && (state->path.active > 1)) {
                        error = xfs_da3_join(state);
                        if (error)
-                               goto out_defer_cancel;
+                               goto out;
                        error = xfs_defer_finish(&args->trans);
                        if (error)
                                goto out;
@@ -1013,9 +1007,6 @@ xfs_attr_node_addname(
        if (error)
                return error;
        return retval;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       goto out;
 }
 
 /*
@@ -1107,7 +1098,7 @@ xfs_attr_node_removename(
        if (retval && (state->path.active > 1)) {
                error = xfs_da3_join(state);
                if (error)
-                       goto out_defer_cancel;
+                       goto out;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        goto out;
@@ -1138,7 +1129,7 @@ xfs_attr_node_removename(
                        error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
                        /* bp is gone due to xfs_da_shrink_inode */
                        if (error)
-                               goto out_defer_cancel;
+                               goto out;
                        error = xfs_defer_finish(&args->trans);
                        if (error)
                                goto out;
@@ -1150,9 +1141,6 @@ xfs_attr_node_removename(
 out:
        xfs_da_state_free(state);
        return error;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       goto out;
 }
 
 /*
index af094063e4029ae9b15af7d7da29419b7706485f..d89363c6b5234d73cef58d4e9533a88f6de09c46 100644 (file)
@@ -485,7 +485,7 @@ xfs_attr_rmtval_set(
                                  blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
                                  &nmap);
                if (error)
-                       goto out_defer_cancel;
+                       return error;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        return error;
@@ -553,9 +553,6 @@ xfs_attr_rmtval_set(
        }
        ASSERT(valuelen == 0);
        return 0;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       return error;
 }
 
 /*
@@ -625,7 +622,7 @@ xfs_attr_rmtval_remove(
                error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
                                    XFS_BMAPI_ATTRFORK, 1, &done);
                if (error)
-                       goto out_defer_cancel;
+                       return error;
                error = xfs_defer_finish(&args->trans);
                if (error)
                        return error;
@@ -638,7 +635,4 @@ xfs_attr_rmtval_remove(
                        return error;
        }
        return 0;
-out_defer_cancel:
-       xfs_defer_cancel(args->trans);
-       return error;
 }
index 2760314fdf7f1a9a57de82a043748bafe8273995..a47670332326449cb97f73850887cea799ea684c 100644 (file)
@@ -673,7 +673,8 @@ xfs_bmap_extents_to_btree(
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
 
        /*
-        * Make space in the inode incore.
+        * Make space in the inode incore. This needs to be undone if we fail
+        * to expand the root.
         */
        xfs_iroot_realloc(ip, 1, whichfork);
        ifp->if_flags |= XFS_IFBROOT;
@@ -711,16 +712,15 @@ xfs_bmap_extents_to_btree(
        args.minlen = args.maxlen = args.prod = 1;
        args.wasdel = wasdel;
        *logflagsp = 0;
-       if ((error = xfs_alloc_vextent(&args))) {
-               ASSERT(ifp->if_broot == NULL);
-               goto err1;
-       }
+       error = xfs_alloc_vextent(&args);
+       if (error)
+               goto out_root_realloc;
 
        if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
-               ASSERT(ifp->if_broot == NULL);
                error = -ENOSPC;
-               goto err1;
+               goto out_root_realloc;
        }
+
        /*
         * Allocation can't fail, the space was reserved.
         */
@@ -732,9 +732,10 @@ xfs_bmap_extents_to_btree(
        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
        abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
        if (!abp) {
-               error = -ENOSPC;
-               goto err2;
+               error = -EFSCORRUPTED;
+               goto out_unreserve_dquot;
        }
+
        /*
         * Fill in the child block.
         */
@@ -775,11 +776,12 @@ xfs_bmap_extents_to_btree(
        *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
        return 0;
 
-err2:
+out_unreserve_dquot:
        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
-err1:
+out_root_realloc:
        xfs_iroot_realloc(ip, -1, whichfork);
        XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+       ASSERT(ifp->if_broot == NULL);
        xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 
        return error;
index 059bc44c27e83edf3cb1fe2c494490e65f93c5d8..afbe336600e165e2fe475ebcf6683e3f677c1cc4 100644 (file)
@@ -1016,6 +1016,8 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
 #define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
 #define XFS_DIFLAG_NODEFRAG_BIT     13 /* do not reorganize/defragment */
 #define XFS_DIFLAG_FILESTREAM_BIT   14  /* use filestream allocator */
+/* Do not use bit 15, di_flags is legacy and unchanging now */
+
 #define XFS_DIFLAG_REALTIME      (1 << XFS_DIFLAG_REALTIME_BIT)
 #define XFS_DIFLAG_PREALLOC      (1 << XFS_DIFLAG_PREALLOC_BIT)
 #define XFS_DIFLAG_NEWRTBM       (1 << XFS_DIFLAG_NEWRTBM_BIT)
index 30d1d60f1d46e62ff71eca1f45b273536cc6cce1..09d9c8cfa4a09f933a55f1122879809ecb3010af 100644 (file)
@@ -415,6 +415,31 @@ xfs_dinode_verify_fork(
        return NULL;
 }
 
+static xfs_failaddr_t
+xfs_dinode_verify_forkoff(
+       struct xfs_dinode       *dip,
+       struct xfs_mount        *mp)
+{
+       if (!XFS_DFORK_Q(dip))
+               return NULL;
+
+       switch (dip->di_format)  {
+       case XFS_DINODE_FMT_DEV:
+               if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
+                       return __this_address;
+               break;
+       case XFS_DINODE_FMT_LOCAL:      /* fall through ... */
+       case XFS_DINODE_FMT_EXTENTS:    /* fall through ... */
+       case XFS_DINODE_FMT_BTREE:
+               if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
+                       return __this_address;
+               break;
+       default:
+               return __this_address;
+       }
+       return NULL;
+}
+
 xfs_failaddr_t
 xfs_dinode_verify(
        struct xfs_mount        *mp,
@@ -470,6 +495,11 @@ xfs_dinode_verify(
        if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
                return __this_address;
 
+       /* check for illegal values of forkoff */
+       fa = xfs_dinode_verify_forkoff(dip, mp);
+       if (fa)
+               return fa;
+
        /* Do we have appropriate data fork formats for the mode? */
        switch (mode & S_IFMT) {
        case S_IFIFO:
index 036b5c7021eb322452fac7e0350ef7d47b8aa2eb..376bcb585ae6916e8ca089009d37f6e79b4bed19 100644 (file)
@@ -17,7 +17,6 @@
 #include "xfs_sb.h"
 #include "xfs_alloc.h"
 #include "xfs_rmap.h"
-#include "xfs_alloc.h"
 #include "scrub/xfs_scrub.h"
 #include "scrub/scrub.h"
 #include "scrub/common.h"
index 5b3b177c0fc908fa9527b3a35302e91993027c9c..e386c9b0b4ab7de6bc2d42fddcf204539d8d2a28 100644 (file)
@@ -126,6 +126,7 @@ xchk_inode_flags(
 {
        struct xfs_mount        *mp = sc->mp;
 
+       /* di_flags are all taken, last bit cannot be used */
        if (flags & ~XFS_DIFLAG_ANY)
                goto bad;
 
@@ -172,8 +173,9 @@ xchk_inode_flags2(
 {
        struct xfs_mount        *mp = sc->mp;
 
+       /* Unknown di_flags2 could be from a future kernel */
        if (flags2 & ~XFS_DIFLAG2_ANY)
-               goto bad;
+               xchk_ino_set_warning(sc, ino);
 
        /* reflink flag requires reflink feature */
        if ((flags2 & XFS_DIFLAG2_REFLINK) &&
index addbd74ecd8e5185ec70ae91cfe4cebe700bc9bb..6de8d90041ff0e676e85e559c1b2eef65dd74946 100644 (file)
@@ -702,13 +702,9 @@ xfs_bmap_punch_delalloc_range(
        struct xfs_iext_cursor  icur;
        int                     error = 0;
 
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
-               error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
-               if (error)
-                       goto out_unlock;
-       }
+       ASSERT(ifp->if_flags & XFS_IFEXTENTS);
 
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
        if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
                goto out_unlock;
 
@@ -1584,7 +1580,7 @@ xfs_swap_extent_rmap(
                                        tirec.br_blockcount, &irec,
                                        &nimaps, 0);
                        if (error)
-                               goto out_defer;
+                               goto out;
                        ASSERT(nimaps == 1);
                        ASSERT(tirec.br_startoff == irec.br_startoff);
                        trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
@@ -1599,22 +1595,22 @@ xfs_swap_extent_rmap(
                        /* Remove the mapping from the donor file. */
                        error = xfs_bmap_unmap_extent(tp, tip, &uirec);
                        if (error)
-                               goto out_defer;
+                               goto out;
 
                        /* Remove the mapping from the source file. */
                        error = xfs_bmap_unmap_extent(tp, ip, &irec);
                        if (error)
-                               goto out_defer;
+                               goto out;
 
                        /* Map the donor file's blocks into the source file. */
                        error = xfs_bmap_map_extent(tp, ip, &uirec);
                        if (error)
-                               goto out_defer;
+                               goto out;
 
                        /* Map the source file's blocks into the donor file. */
                        error = xfs_bmap_map_extent(tp, tip, &irec);
                        if (error)
-                               goto out_defer;
+                               goto out;
 
                        error = xfs_defer_finish(tpp);
                        tp = *tpp;
@@ -1636,8 +1632,6 @@ xfs_swap_extent_rmap(
        tip->i_d.di_flags2 = tip_flags2;
        return 0;
 
-out_defer:
-       xfs_defer_cancel(tp);
 out:
        trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
        tip->i_d.di_flags2 = tip_flags2;
index 1c9d1398980b6562969ab03a38e5158aeafc6c07..12d8455bfbb29114887744046d52cb75428bc911 100644 (file)
@@ -531,6 +531,49 @@ xfs_buf_item_push(
        return rval;
 }
 
+/*
+ * Drop the buffer log item refcount and take appropriate action. This helper
+ * determines whether the bli must be freed or not, since a decrement to zero
+ * does not necessarily mean the bli is unused.
+ *
+ * Return true if the bli is freed, false otherwise.
+ */
+bool
+xfs_buf_item_put(
+       struct xfs_buf_log_item *bip)
+{
+       struct xfs_log_item     *lip = &bip->bli_item;
+       bool                    aborted;
+       bool                    dirty;
+
+       /* drop the bli ref and return if it wasn't the last one */
+       if (!atomic_dec_and_test(&bip->bli_refcount))
+               return false;
+
+       /*
+        * We dropped the last ref and must free the item if clean or aborted.
+        * If the bli is dirty and non-aborted, the buffer was clean in the
+        * transaction but still awaiting writeback from previous changes. In
+        * that case, the bli is freed on buffer writeback completion.
+        */
+       aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
+                 XFS_FORCED_SHUTDOWN(lip->li_mountp);
+       dirty = bip->bli_flags & XFS_BLI_DIRTY;
+       if (dirty && !aborted)
+               return false;
+
+       /*
+        * The bli is aborted or clean. An aborted item may be in the AIL
+        * regardless of dirty state.  For example, consider an aborted
+        * transaction that invalidated a dirty bli and cleared the dirty
+        * state.
+        */
+       if (aborted)
+               xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
+       xfs_buf_item_relse(bip->bli_buf);
+       return true;
+}
+
 /*
  * Release the buffer associated with the buf log item.  If there is no dirty
  * logged data associated with the buffer recorded in the buf log item, then
@@ -556,76 +599,42 @@ xfs_buf_item_unlock(
 {
        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
        struct xfs_buf          *bp = bip->bli_buf;
-       bool                    aborted;
-       bool                    hold = !!(bip->bli_flags & XFS_BLI_HOLD);
-       bool                    dirty = !!(bip->bli_flags & XFS_BLI_DIRTY);
+       bool                    released;
+       bool                    hold = bip->bli_flags & XFS_BLI_HOLD;
+       bool                    stale = bip->bli_flags & XFS_BLI_STALE;
 #if defined(DEBUG) || defined(XFS_WARN)
-       bool                    ordered = !!(bip->bli_flags & XFS_BLI_ORDERED);
+       bool                    ordered = bip->bli_flags & XFS_BLI_ORDERED;
+       bool                    dirty = bip->bli_flags & XFS_BLI_DIRTY;
 #endif
 
-       aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags);
-
-       /* Clear the buffer's association with this transaction. */
-       bp->b_transp = NULL;
-
-       /*
-        * The per-transaction state has been copied above so clear it from the
-        * bli.
-        */
-       bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
-
-       /*
-        * If the buf item is marked stale, then don't do anything.  We'll
-        * unlock the buffer and free the buf item when the buffer is unpinned
-        * for the last time.
-        */
-       if (bip->bli_flags & XFS_BLI_STALE) {
-               trace_xfs_buf_item_unlock_stale(bip);
-               ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
-               if (!aborted) {
-                       atomic_dec(&bip->bli_refcount);
-                       return;
-               }
-       }
-
        trace_xfs_buf_item_unlock(bip);
 
        /*
-        * If the buf item isn't tracking any data, free it, otherwise drop the
-        * reference we hold to it. If we are aborting the transaction, this may
-        * be the only reference to the buf item, so we free it anyway
-        * regardless of whether it is dirty or not. A dirty abort implies a
-        * shutdown, anyway.
-        *
         * The bli dirty state should match whether the blf has logged segments
         * except for ordered buffers, where only the bli should be dirty.
         */
        ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
               (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
+       ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
 
        /*
-        * Clean buffers, by definition, cannot be in the AIL. However, aborted
-        * buffers may be in the AIL regardless of dirty state. An aborted
-        * transaction that invalidates a buffer already in the AIL may have
-        * marked it stale and cleared the dirty state, for example.
-        *
-        * Therefore if we are aborting a buffer and we've just taken the last
-        * reference away, we have to check if it is in the AIL before freeing
-        * it. We need to free it in this case, because an aborted transaction
-        * has already shut the filesystem down and this is the last chance we
-        * will have to do so.
+        * Clear the buffer's association with this transaction and
+        * per-transaction state from the bli, which has been copied above.
         */
-       if (atomic_dec_and_test(&bip->bli_refcount)) {
-               if (aborted) {
-                       ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
-                       xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
-                       xfs_buf_item_relse(bp);
-               } else if (!dirty)
-                       xfs_buf_item_relse(bp);
-       }
+       bp->b_transp = NULL;
+       bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
 
-       if (!hold)
-               xfs_buf_relse(bp);
+       /*
+        * Unref the item and unlock the buffer unless held or stale. Stale
+        * buffers remain locked until final unpin unless the bli is freed by
+        * the unref call. The latter implies shutdown because buffer
+        * invalidation dirties the bli and transaction.
+        */
+       released = xfs_buf_item_put(bip);
+       if (hold || (stale && !released))
+               return;
+       ASSERT(!stale || test_bit(XFS_LI_ABORTED, &lip->li_flags));
+       xfs_buf_relse(bp);
 }
 
 /*
index 3f7d7b72e7e610aa9d7b5f717858895ccd48ea3d..90f65f891fabd27210e52a2c9085c677d12fde62 100644 (file)
@@ -51,6 +51,7 @@ struct xfs_buf_log_item {
 
 int    xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
 void   xfs_buf_item_relse(struct xfs_buf *);
+bool   xfs_buf_item_put(struct xfs_buf_log_item *);
 void   xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
 bool   xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
 void   xfs_buf_attach_iodone(struct xfs_buf *,
index d957a46dc1cb8754f7cc99b35d30a15a7fb45da2..05db9540e4597536211446475d301eac834e972e 100644 (file)
@@ -1563,7 +1563,7 @@ xfs_itruncate_extents_flags(
                error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
                                    XFS_ITRUNC_MAX_EXTENTS, &done);
                if (error)
-                       goto out_bmap_cancel;
+                       goto out;
 
                /*
                 * Duplicate the transaction that has the permanent
@@ -1599,14 +1599,6 @@ xfs_itruncate_extents_flags(
 out:
        *tpp = tp;
        return error;
-out_bmap_cancel:
-       /*
-        * If the bunmapi call encounters an error, return to the caller where
-        * the transaction can be properly aborted.  We just need to make sure
-        * we're not holding any resources that we were not when we came in.
-        */
-       xfs_defer_cancel(tp);
-       goto out;
 }
 
 int
index c3e74f9128e8af22e64f6142b104389a9e96f10e..f48ffd7a8d3e491d76defe66961194a635276115 100644 (file)
@@ -471,8 +471,18 @@ xfs_vn_get_link_inline(
        struct inode            *inode,
        struct delayed_call     *done)
 {
+       char                    *link;
+
        ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
-       return XFS_I(inode)->i_df.if_u1.if_data;
+
+       /*
+        * The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
+        * if_data is junk.
+        */
+       link = XFS_I(inode)->i_df.if_u1.if_data;
+       if (!link)
+               return ERR_PTR(-EFSCORRUPTED);
+       return link;
 }
 
 STATIC int
index a21dc61ec09eb3f76faaeaf653606e942f6cdd90..1fc9e9042e0ef7dfb1e138f229a4f91e231a91f7 100644 (file)
@@ -1570,16 +1570,6 @@ xlog_find_zeroed(
        if (last_cycle != 0) {          /* log completely written to */
                xlog_put_bp(bp);
                return 0;
-       } else if (first_cycle != 1) {
-               /*
-                * If the cycle of the last block is zero, the cycle of
-                * the first block must be 1. If it's not, maybe we're
-                * not looking at a log... Bail out.
-                */
-               xfs_warn(log->l_mp,
-                       "Log inconsistent or not a log (last==0, first!=1)");
-               error = -EINVAL;
-               goto bp_err;
        }
 
        /* we have a partially zeroed log */
index 38f405415b88a4e796c79071182d419c3208f195..5289e22cb081d4aee3f0a57ef7665b3930393c15 100644 (file)
@@ -352,6 +352,47 @@ xfs_reflink_convert_cow(
        return error;
 }
 
+/*
+ * Find the extent that maps the given range in the COW fork. Even if the extent
+ * is not shared we might have a preallocation for it in the COW fork. If so we
+ * use it that rather than trigger a new allocation.
+ */
+static int
+xfs_find_trim_cow_extent(
+       struct xfs_inode        *ip,
+       struct xfs_bmbt_irec    *imap,
+       bool                    *shared,
+       bool                    *found)
+{
+       xfs_fileoff_t           offset_fsb = imap->br_startoff;
+       xfs_filblks_t           count_fsb = imap->br_blockcount;
+       struct xfs_iext_cursor  icur;
+       struct xfs_bmbt_irec    got;
+       bool                    trimmed;
+
+       *found = false;
+
+       /*
+        * If we don't find an overlapping extent, trim the range we need to
+        * allocate to fit the hole we found.
+        */
+       if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) ||
+           got.br_startoff > offset_fsb)
+               return xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
+
+       *shared = true;
+       if (isnullstartblock(got.br_startblock)) {
+               xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
+               return 0;
+       }
+
+       /* real extent found - no need to allocate */
+       xfs_trim_extent(&got, offset_fsb, count_fsb);
+       *imap = got;
+       *found = true;
+       return 0;
+}
+
 /* Allocate all CoW reservations covering a range of blocks in a file. */
 int
 xfs_reflink_allocate_cow(
@@ -363,78 +404,64 @@ xfs_reflink_allocate_cow(
        struct xfs_mount        *mp = ip->i_mount;
        xfs_fileoff_t           offset_fsb = imap->br_startoff;
        xfs_filblks_t           count_fsb = imap->br_blockcount;
-       struct xfs_bmbt_irec    got;
-       struct xfs_trans        *tp = NULL;
+       struct xfs_trans        *tp;
        int                     nimaps, error = 0;
-       bool                    trimmed;
+       bool                    found;
        xfs_filblks_t           resaligned;
        xfs_extlen_t            resblks = 0;
-       struct xfs_iext_cursor  icur;
 
-retry:
-       ASSERT(xfs_is_reflink_inode(ip));
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+       ASSERT(xfs_is_reflink_inode(ip));
 
-       /*
-        * Even if the extent is not shared we might have a preallocation for
-        * it in the COW fork.  If so use it.
-        */
-       if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) &&
-           got.br_startoff <= offset_fsb) {
-               *shared = true;
-
-               /* If we have a real allocation in the COW fork we're done. */
-               if (!isnullstartblock(got.br_startblock)) {
-                       xfs_trim_extent(&got, offset_fsb, count_fsb);
-                       *imap = got;
-                       goto convert;
-               }
+       error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+       if (error || !*shared)
+               return error;
+       if (found)
+               goto convert;
 
-               xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
-       } else {
-               error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
-               if (error || !*shared)
-                       goto out;
-       }
+       resaligned = xfs_aligned_fsb_count(imap->br_startoff,
+               imap->br_blockcount, xfs_get_cowextsz_hint(ip));
+       resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
 
-       if (!tp) {
-               resaligned = xfs_aligned_fsb_count(imap->br_startoff,
-                       imap->br_blockcount, xfs_get_cowextsz_hint(ip));
-               resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
+       xfs_iunlock(ip, *lockmode);
+       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
+       *lockmode = XFS_ILOCK_EXCL;
+       xfs_ilock(ip, *lockmode);
 
-               xfs_iunlock(ip, *lockmode);
-               error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
-               *lockmode = XFS_ILOCK_EXCL;
-               xfs_ilock(ip, *lockmode);
+       if (error)
+               return error;
 
-               if (error)
-                       return error;
+       error = xfs_qm_dqattach_locked(ip, false);
+       if (error)
+               goto out_trans_cancel;
 
-               error = xfs_qm_dqattach_locked(ip, false);
-               if (error)
-                       goto out;
-               goto retry;
+       /*
+        * Check for an overlapping extent again now that we dropped the ilock.
+        */
+       error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+       if (error || !*shared)
+               goto out_trans_cancel;
+       if (found) {
+               xfs_trans_cancel(tp);
+               goto convert;
        }
 
        error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
                        XFS_QMOPT_RES_REGBLKS);
        if (error)
-               goto out;
+               goto out_trans_cancel;
 
        xfs_trans_ijoin(tp, ip, 0);
 
-       nimaps = 1;
-
        /* Allocate the entire reservation as unwritten blocks. */
+       nimaps = 1;
        error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
                        XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
                        resblks, imap, &nimaps);
        if (error)
-               goto out_trans_cancel;
+               goto out_unreserve;
 
        xfs_inode_set_cowblocks_tag(ip);
-
-       /* Finish up. */
        error = xfs_trans_commit(tp);
        if (error)
                return error;
@@ -447,12 +474,12 @@ xfs_reflink_allocate_cow(
                return -ENOSPC;
 convert:
        return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
-out_trans_cancel:
+
+out_unreserve:
        xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
                        XFS_QMOPT_RES_REGBLKS);
-out:
-       if (tp)
-               xfs_trans_cancel(tp);
+out_trans_cancel:
+       xfs_trans_cancel(tp);
        return error;
 }
 
@@ -666,14 +693,12 @@ xfs_reflink_end_cow(
                if (!del.br_blockcount)
                        goto prev_extent;
 
-               ASSERT(!isnullstartblock(got.br_startblock));
-
                /*
-                * Don't remap unwritten extents; these are
-                * speculatively preallocated CoW extents that have been
-                * allocated but have not yet been involved in a write.
+                * Only remap real extent that contain data.  With AIO
+                * speculatively preallocations can leak into the range we
+                * are called upon, and we need to skip them.
                 */
-               if (got.br_state == XFS_EXT_UNWRITTEN)
+               if (!xfs_bmap_is_real_extent(&got))
                        goto prev_extent;
 
                /* Unmap the old blocks in the data fork. */
index ad315e83bc02cfdaf986390e1aa1a91915afae9e..3043e5ed6495580de11de6932117addca0e85aec 100644 (file)
@@ -473,7 +473,6 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
-DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
 DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
index bedc5a5133a56d40ebbf569e9bc02b120f4e3557..912b42f5fe4ac61ed79bbc729f5ccf094de66c32 100644 (file)
@@ -259,6 +259,14 @@ xfs_trans_alloc(
        struct xfs_trans        *tp;
        int                     error;
 
+       /*
+        * Allocate the handle before we do our freeze accounting and setting up
+        * GFP_NOFS allocation context so that we avoid lockdep false positives
+        * by doing GFP_KERNEL allocations inside sb_start_intwrite().
+        */
+       tp = kmem_zone_zalloc(xfs_trans_zone,
+               (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
+
        if (!(flags & XFS_TRANS_NO_WRITECOUNT))
                sb_start_intwrite(mp->m_super);
 
@@ -270,8 +278,6 @@ xfs_trans_alloc(
                mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
        atomic_inc(&mp->m_active_trans);
 
-       tp = kmem_zone_zalloc(xfs_trans_zone,
-               (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
        tp->t_magic = XFS_TRANS_HEADER_MAGIC;
        tp->t_flags = flags;
        tp->t_mountp = mp;
index 15919f67a88f57aeab504f471c9088d066d08493..286a287ac57acc5c5abcbe51a881af09026ecf0e 100644 (file)
@@ -322,49 +322,38 @@ xfs_trans_read_buf_map(
 }
 
 /*
- * Release the buffer bp which was previously acquired with one of the
- * xfs_trans_... buffer allocation routines if the buffer has not
- * been modified within this transaction.  If the buffer is modified
- * within this transaction, do decrement the recursion count but do
- * not release the buffer even if the count goes to 0.  If the buffer is not
- * modified within the transaction, decrement the recursion count and
- * release the buffer if the recursion count goes to 0.
+ * Release a buffer previously joined to the transaction. If the buffer is
+ * modified within this transaction, decrement the recursion count but do not
+ * release the buffer even if the count goes to 0. If the buffer is not modified
+ * within the transaction, decrement the recursion count and release the buffer
+ * if the recursion count goes to 0.
  *
- * If the buffer is to be released and it was not modified before
- * this transaction began, then free the buf_log_item associated with it.
+ * If the buffer is to be released and it was not already dirty before this
+ * transaction began, then also free the buf_log_item associated with it.
  *
- * If the transaction pointer is NULL, make this just a normal
- * brelse() call.
+ * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
  */
 void
 xfs_trans_brelse(
-       xfs_trans_t             *tp,
-       xfs_buf_t               *bp)
+       struct xfs_trans        *tp,
+       struct xfs_buf          *bp)
 {
-       struct xfs_buf_log_item *bip;
-       int                     freed;
+       struct xfs_buf_log_item *bip = bp->b_log_item;
 
-       /*
-        * Default to a normal brelse() call if the tp is NULL.
-        */
-       if (tp == NULL) {
-               ASSERT(bp->b_transp == NULL);
+       ASSERT(bp->b_transp == tp);
+
+       if (!tp) {
                xfs_buf_relse(bp);
                return;
        }
 
-       ASSERT(bp->b_transp == tp);
-       bip = bp->b_log_item;
+       trace_xfs_trans_brelse(bip);
        ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
-       ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
-       trace_xfs_trans_brelse(bip);
-
        /*
-        * If the release is just for a recursive lock,
-        * then decrement the count and return.
+        * If the release is for a recursive lookup, then decrement the count
+        * and return.
         */
        if (bip->bli_recur > 0) {
                bip->bli_recur--;
@@ -372,64 +361,24 @@ xfs_trans_brelse(
        }
 
        /*
-        * If the buffer is dirty within this transaction, we can't
+        * If the buffer is invalidated or dirty in this transaction, we can't
         * release it until we commit.
         */
        if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
                return;
-
-       /*
-        * If the buffer has been invalidated, then we can't release
-        * it until the transaction commits to disk unless it is re-dirtied
-        * as part of this transaction.  This prevents us from pulling
-        * the item from the AIL before we should.
-        */
        if (bip->bli_flags & XFS_BLI_STALE)
                return;
 
-       ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
-
        /*
-        * Free up the log item descriptor tracking the released item.
+        * Unlink the log item from the transaction and clear the hold flag, if
+        * set. We wouldn't want the next user of the buffer to get confused.
         */
+       ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
        xfs_trans_del_item(&bip->bli_item);
+       bip->bli_flags &= ~XFS_BLI_HOLD;
 
-       /*
-        * Clear the hold flag in the buf log item if it is set.
-        * We wouldn't want the next user of the buffer to
-        * get confused.
-        */
-       if (bip->bli_flags & XFS_BLI_HOLD) {
-               bip->bli_flags &= ~XFS_BLI_HOLD;
-       }
-
-       /*
-        * Drop our reference to the buf log item.
-        */
-       freed = atomic_dec_and_test(&bip->bli_refcount);
-
-       /*
-        * If the buf item is not tracking data in the log, then we must free it
-        * before releasing the buffer back to the free pool.
-        *
-        * If the fs has shutdown and we dropped the last reference, it may fall
-        * on us to release a (possibly dirty) bli if it never made it to the
-        * AIL (e.g., the aborted unpin already happened and didn't release it
-        * due to our reference). Since we're already shutdown and need
-        * ail_lock, just force remove from the AIL and release the bli here.
-        */
-       if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
-               xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
-               xfs_buf_item_relse(bp);
-       } else if (!(bip->bli_flags & XFS_BLI_DIRTY)) {
-/***
-               ASSERT(bp->b_pincount == 0);
-***/
-               ASSERT(atomic_read(&bip->bli_refcount) == 0);
-               ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
-               ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
-               xfs_buf_item_relse(bp);
-       }
+       /* drop the reference to the bli */
+       xfs_buf_item_put(bip);
 
        bp->b_transp = NULL;
        xfs_buf_relse(bp);
index 989f8e52864dadc895f799fdf677eed375d5a816..971bb7853776072a2ac9109ddac83c204ab6f67c 100644 (file)
@@ -87,9 +87,10 @@ struct drm_client_dev {
        struct drm_file *file;
 };
 
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
-                  const char *name, const struct drm_client_funcs *funcs);
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+                   const char *name, const struct drm_client_funcs *funcs);
 void drm_client_release(struct drm_client_dev *client);
+void drm_client_add(struct drm_client_dev *client);
 
 void drm_client_dev_unregister(struct drm_device *dev);
 void drm_client_dev_hotplug(struct drm_device *dev);
index 582a0ec0aa70448e07b39f5de7f49868e5787953..777814755fa62baff930d0f2cd46e0cec1525500 100644 (file)
@@ -89,7 +89,6 @@ struct drm_panel {
        struct drm_device *drm;
        struct drm_connector *connector;
        struct device *dev;
-       struct device_link *link;
 
        const struct drm_panel_funcs *funcs;
 
index 8942e61f0028c33ef2eb46595f4bd9eebc900fac..8ab5df76992395276bf35ea4ee7437ef5a00cf3a 100644 (file)
@@ -53,12 +53,20 @@ enum fpga_mgr_states {
        FPGA_MGR_STATE_OPERATING,
 };
 
-/*
- * FPGA Manager flags
- * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
- * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
- * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
- * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
+/**
+ * DOC: FPGA Manager flags
+ *
+ * Flags used in the &fpga_image_info->flags field
+ *
+ * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ *
+ * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ *
+ * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
+ *
+ * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ *
+ * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
  */
 #define FPGA_MGR_PARTIAL_RECONFIG      BIT(0)
 #define FPGA_MGR_EXTERNAL_CONFIG       BIT(1)
index 6c0b4a1c22ff5bd84c5c4e29ce07b0366b7bc5b1..897eae8faee1b04f12fe8cbebbaae2b4505a771d 100644 (file)
@@ -1828,8 +1828,10 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
 extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
                                      struct inode *inode_out, loff_t pos_out,
                                      u64 *len, bool is_dedupe);
+extern int do_clone_file_range(struct file *file_in, loff_t pos_in,
+                              struct file *file_out, loff_t pos_out, u64 len);
 extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
-               struct file *file_out, loff_t pos_out, u64 len);
+                               struct file *file_out, loff_t pos_out, u64 len);
 extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
                                         struct inode *dest, loff_t destoff,
                                         loff_t len, bool *is_same);
@@ -2773,19 +2775,6 @@ static inline void file_end_write(struct file *file)
        __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
 }
 
-static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
-                                     struct file *file_out, loff_t pos_out,
-                                     u64 len)
-{
-       int ret;
-
-       file_start_write(file_out);
-       ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
-       file_end_write(file_out);
-
-       return ret;
-}
-
 /*
  * get_write_access() gets write permission for a file.
  * put_write_access() releases this write permission.
index 6b68e345f0ca64da6590817f719796471d0c5c2d..087fd5f48c9128752cf7ff8a872f30afab057381 100644 (file)
@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
 pte_t *huge_pte_offset(struct mm_struct *mm,
                       unsigned long addr, unsigned long sz);
 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
                              int write);
 struct page *follow_huge_pd(struct vm_area_struct *vma,
@@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void)
        return 0;
 }
 
+static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
+                                       pte_t *ptep)
+{
+       return 0;
+}
+
+static inline void adjust_range_if_pmd_sharing_possible(
+                               struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+}
+
 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n)        ({ BUG(); 0; })
 #define follow_huge_addr(mm, addr, write)      ERR_PTR(-EINVAL)
 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
index a528747f8aedb048b873353d40426d76058cffd1..e8338e5dc10bfd5e6bb415337ff3667c39e008f1 100644 (file)
@@ -78,9 +78,9 @@ enum {
        BD71837_REG_TRANS_COND0        = 0x1F,
        BD71837_REG_TRANS_COND1        = 0x20,
        BD71837_REG_VRFAULTEN          = 0x21,
-       BD71837_REG_MVRFLTMASK0        = 0x22,
-       BD71837_REG_MVRFLTMASK1        = 0x23,
-       BD71837_REG_MVRFLTMASK2        = 0x24,
+       BD718XX_REG_MVRFLTMASK0        = 0x22,
+       BD718XX_REG_MVRFLTMASK1        = 0x23,
+       BD718XX_REG_MVRFLTMASK2        = 0x24,
        BD71837_REG_RCVCFG             = 0x25,
        BD71837_REG_RCVNUM             = 0x26,
        BD71837_REG_PWRONCONFIG0       = 0x27,
@@ -159,6 +159,33 @@ enum {
 #define BUCK8_MASK             0x3F
 #define BUCK8_DEFAULT          0x1E
 
+/* BD718XX Voltage monitoring masks */
+#define BD718XX_BUCK1_VRMON80           0x1
+#define BD718XX_BUCK1_VRMON130          0x2
+#define BD718XX_BUCK2_VRMON80           0x4
+#define BD718XX_BUCK2_VRMON130          0x8
+#define BD718XX_1ST_NODVS_BUCK_VRMON80  0x1
+#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
+#define BD718XX_2ND_NODVS_BUCK_VRMON80  0x4
+#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
+#define BD718XX_3RD_NODVS_BUCK_VRMON80  0x10
+#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
+#define BD718XX_4TH_NODVS_BUCK_VRMON80  0x40
+#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
+#define BD718XX_LDO1_VRMON80            0x1
+#define BD718XX_LDO2_VRMON80            0x2
+#define BD718XX_LDO3_VRMON80            0x4
+#define BD718XX_LDO4_VRMON80            0x8
+#define BD718XX_LDO5_VRMON80            0x10
+#define BD718XX_LDO6_VRMON80            0x20
+
+/* BD71837 specific voltage monitoring masks */
+#define BD71837_BUCK3_VRMON80           0x10
+#define BD71837_BUCK3_VRMON130          0x20
+#define BD71837_BUCK4_VRMON80           0x40
+#define BD71837_BUCK4_VRMON130          0x80
+#define BD71837_LDO7_VRMON80            0x40
+
 /* BD71837_REG_IRQ bits */
 #define IRQ_SWRST              0x40
 #define IRQ_PWRON_S            0x20
index 83a33a1873a6823be1a033dec3f0743a08732f88..7f5ca2cd3a32f7438f3f1ab39ad47422a701b53b 100644 (file)
@@ -90,6 +90,8 @@ struct mlx5_hairpin {
 
        u32 *rqn;
        u32 *sqn;
+
+       bool peer_gone;
 };
 
 struct mlx5_hairpin *
index a61ebe8ad4ca92e72e23855c17f8e7c9ad059a54..0416a7204be37b331a506efedc5c4c1333633a6a 100644 (file)
@@ -2455,6 +2455,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
        return vma;
 }
 
+static inline bool range_in_vma(struct vm_area_struct *vma,
+                               unsigned long start, unsigned long end)
+{
+       return (vma && vma->vm_start <= start && end <= vma->vm_end);
+}
+
 #ifdef CONFIG_MMU
 pgprot_t vm_get_page_prot(unsigned long vm_flags);
 void vma_set_page_prot(struct vm_area_struct *vma);
index 1e22d96734e0099476b18d14ca7c11a991d3a470..3f4c0b167333a37ca1a5b53cc3259904d5fc91ad 100644 (file)
@@ -671,12 +671,6 @@ typedef struct pglist_data {
 #ifdef CONFIG_NUMA_BALANCING
        /* Lock serializing the migrate rate limiting window */
        spinlock_t numabalancing_migrate_lock;
-
-       /* Rate limiting time interval */
-       unsigned long numabalancing_migrate_next_window;
-
-       /* Number of pages migrated during the rate limiting time interval */
-       unsigned long numabalancing_migrate_nr_pages;
 #endif
        /*
         * This is a per-node reserve of pages that are not available
index ca5ab98053c8d48312d9f479a861c6c426b33e54..c7861e4b402c131cfb548f7d0ed863c4ec3ee3e5 100644 (file)
@@ -1730,6 +1730,8 @@ enum netdev_priv_flags {
  *                     switch driver and used to set the phys state of the
  *                     switch port.
  *
+ *     @wol_enabled:   Wake-on-LAN is enabled
+ *
  *     FIXME: cleanup struct net_device such that network protocol info
  *     moves out.
  */
@@ -2014,6 +2016,7 @@ struct net_device {
        struct lock_class_key   *qdisc_tx_busylock;
        struct lock_class_key   *qdisc_running_key;
        bool                    proto_down;
+       unsigned                wol_enabled:1;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
index 07efffd0c759d0b509dec19acb6b718cbab06031..bbe99d2b28b4c62063450b7c4dadc4013c377897 100644 (file)
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
                break;
        case NFPROTO_ARP:
 #ifdef CONFIG_NETFILTER_FAMILY_ARP
+               if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
+                       break;
                hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
 #endif
                break;
index 3468703d663af6d94bbf6fd07343d8bd9418b81a..a459a5e973a7294f171e192dd4dacf98d5b27c16 100644 (file)
@@ -48,9 +48,9 @@ struct regulator;
  * DISABLE_IN_SUSPEND  - turn off regulator in suspend states
  * ENABLE_IN_SUSPEND   - keep regulator on in suspend states
  */
-#define DO_NOTHING_IN_SUSPEND  (-1)
-#define DISABLE_IN_SUSPEND     0
-#define ENABLE_IN_SUSPEND      1
+#define DO_NOTHING_IN_SUSPEND  0
+#define DISABLE_IN_SUSPEND     1
+#define ENABLE_IN_SUSPEND      2
 
 /* Regulator active discharge flags */
 enum regulator_active_discharge {
index c0e795d95477daea839ba98f762e09195e327184..1c89611e0e0634aec1f64c62d99637f4ee709bc0 100644 (file)
@@ -36,6 +36,7 @@ enum {
        SCIx_SH4_SCIF_FIFODATA_REGTYPE,
        SCIx_SH7705_SCIF_REGTYPE,
        SCIx_HSCIF_REGTYPE,
+       SCIx_RZ_SCIFA_REGTYPE,
 
        SCIx_NR_REGTYPES,
 };
index b2bd4b4127c46a2e8f2eb103d989b629d7169570..69ee30456864a05ceb76bac1d0dbe8df3a0e3448 100644 (file)
@@ -81,8 +81,10 @@ enum spi_mem_data_dir {
  * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
  * @data.buswidth: number of IO lanes used to send/receive the data
  * @data.dir: direction of the transfer
- * @data.buf.in: input buffer
- * @data.buf.out: output buffer
+ * @data.nbytes: number of data bytes to send/receive. Can be zero if the
+ *              operation does not involve transferring data
+ * @data.buf.in: input buffer (must be DMA-able)
+ * @data.buf.out: output buffer (must be DMA-able)
  */
 struct spi_mem_op {
        struct {
@@ -105,7 +107,6 @@ struct spi_mem_op {
                u8 buswidth;
                enum spi_mem_data_dir dir;
                unsigned int nbytes;
-               /* buf.{in,out} must be DMA-able. */
                union {
                        void *in;
                        const void *out;
index 9397628a196714dc2177552465fe91fd18b9627d..cb462f9ab7dd592bc1d613c86aefeb787cdd9321 100644 (file)
@@ -5,6 +5,24 @@
 #include <linux/if_vlan.h>
 #include <uapi/linux/virtio_net.h>
 
+static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
+                                          const struct virtio_net_hdr *hdr)
+{
+       switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+       case VIRTIO_NET_HDR_GSO_TCPV4:
+       case VIRTIO_NET_HDR_GSO_UDP:
+               skb->protocol = cpu_to_be16(ETH_P_IP);
+               break;
+       case VIRTIO_NET_HDR_GSO_TCPV6:
+               skb->protocol = cpu_to_be16(ETH_P_IPV6);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                                        const struct virtio_net_hdr *hdr,
                                        bool little_endian)
index ea73fef8bdc021b48e68b4b4ce8bb7fe7fc44b57..8586cfb498286ce4399487f4f4627ff2a0b16e9f 100644 (file)
@@ -38,10 +38,13 @@ struct v4l2_ctrl_handler;
  * @prio: priority of the file handler, as defined by &enum v4l2_priority
  *
  * @wait: event' s wait queue
+ * @subscribe_lock: serialise changes to the subscribed list; guarantee that
+ *                 the add and del event callbacks are orderly called
  * @subscribed: list of subscribed events
  * @available: list of events waiting to be dequeued
  * @navailable: number of available events at @available list
  * @sequence: event sequence number
+ *
  * @m2m_ctx: pointer to &struct v4l2_m2m_ctx
  */
 struct v4l2_fh {
@@ -52,6 +55,7 @@ struct v4l2_fh {
 
        /* Events */
        wait_queue_head_t       wait;
+       struct mutex            subscribe_lock;
        struct list_head        subscribed;
        struct list_head        available;
        unsigned int            navailable;
index a2d058170ea3c38739263570bcf14f2a0935e16f..b46d68acf7011f39d86eb65ad2d9886f650f144e 100644 (file)
@@ -139,12 +139,6 @@ struct bond_parm_tbl {
        int mode;
 };
 
-struct netdev_notify_work {
-       struct delayed_work     work;
-       struct net_device       *dev;
-       struct netdev_bonding_info bonding_info;
-};
-
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
@@ -172,6 +166,7 @@ struct slave {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll *np;
 #endif
+       struct delayed_work notify_work;
        struct kobject kobj;
        struct rtnl_link_stats64 slave_stats;
 };
index 8ebabc9873d1593b46161697b53c8a8d14242000..4de121e24ce58fa6e19aa24ffec8cf2c79483132 100644 (file)
@@ -4852,8 +4852,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
  *
  * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
  * @freq: the freqency(in MHz) to be queried.
- * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
- *     irrelevant). This can be used later for deduplication.
  * @rule: pointer to store the wmm rule from the regulatory db.
  *
  * Self-managed wireless drivers can use this function to  query
index e03b93360f332b3e3232873ac1cbd0ee7478fabb..a80fd0ac4563283246f4f53cea1ac0cd17b41dab 100644 (file)
@@ -130,12 +130,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
        return sk->sk_bound_dev_if;
 }
 
-static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
-{
-       return rcu_dereference_check(ireq->ireq_opt,
-                                    refcount_read(&ireq->req.rsk_refcnt) > 0);
-}
-
 struct inet_cork {
        unsigned int            flags;
        __be32                  addr;
index 0c154f98e987367256a40a1154eeb5ea8923a730..39e1d875d507780a08d5cb8b939083d9e58f1e22 100644 (file)
  *   nla_find()                                find attribute in stream of attributes
  *   nla_find_nested()                 find attribute in nested attributes
  *   nla_parse()                       parse and validate stream of attrs
- *   nla_parse_nested()                        parse nested attribuets
+ *   nla_parse_nested()                        parse nested attributes
  *   nla_for_each_attr()               loop over all attributes
  *   nla_for_each_nested()             loop over the nested attributes
  *=========================================================================
index 7113728459451d50f08e5db83680ddf21aca05f9..705b33d1e395e86cd8889d9dff3b2f93bc28a608 100644 (file)
@@ -70,33 +70,6 @@ TRACE_EVENT(mm_migrate_pages,
                __print_symbolic(__entry->mode, MIGRATE_MODE),
                __print_symbolic(__entry->reason, MIGRATE_REASON))
 );
-
-TRACE_EVENT(mm_numa_migrate_ratelimit,
-
-       TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
-
-       TP_ARGS(p, dst_nid, nr_pages),
-
-       TP_STRUCT__entry(
-               __array(        char,           comm,   TASK_COMM_LEN)
-               __field(        pid_t,          pid)
-               __field(        int,            dst_nid)
-               __field(        unsigned long,  nr_pages)
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-               __entry->pid            = p->pid;
-               __entry->dst_nid        = dst_nid;
-               __entry->nr_pages       = nr_pages;
-       ),
-
-       TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
-               __entry->comm,
-               __entry->pid,
-               __entry->dst_nid,
-               __entry->nr_pages)
-);
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
index 196587b8f204de13da0529c3cce46b68df75b4ac..837393fa897bb764264741ec2051f163841f0a4d 100644 (file)
@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
        rxrpc_peer_new,
        rxrpc_peer_processing,
        rxrpc_peer_put,
-       rxrpc_peer_queued_error,
 };
 
 enum rxrpc_conn_trace {
@@ -257,8 +256,7 @@ enum rxrpc_tx_point {
        EM(rxrpc_peer_got,                      "GOT") \
        EM(rxrpc_peer_new,                      "NEW") \
        EM(rxrpc_peer_processing,               "PRO") \
-       EM(rxrpc_peer_put,                      "PUT") \
-       E_(rxrpc_peer_queued_error,             "QER")
+       E_(rxrpc_peer_put,                      "PUT")
 
 #define rxrpc_conn_traces \
        EM(rxrpc_conn_got,                      "GOT") \
index e4732d3c2998264857772faac64409c69ff8e43d..b0f8e87235bdf4b599b52895637d9bd6329887fa 100644 (file)
@@ -26,7 +26,9 @@
 #define HUGETLB_FLAG_ENCODE_2MB                (21 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_8MB                (23 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_16MB       (24 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB       (25 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_256MB      (28 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB      (29 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_1GB                (30 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_2GB                (31 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_16GB       (34 << HUGETLB_FLAG_ENCODE_SHIFT)
index 015a4c0bbb47d6e9cabc6aac2c254e8f6c7ee960..7a8a26751c2317ee30bf5f84e2be9fc977c9d2f9 100644 (file)
@@ -25,7 +25,9 @@
 #define MFD_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
 #define MFD_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
 #define MFD_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define MFD_HUGE_32MB  HUGETLB_FLAG_ENCODE_32MB
 #define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MFD_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
 #define MFD_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
 #define MFD_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
 #define MFD_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
index bfd5938fede6c1ba3b71d096cd36127da2837042..d0f515d53299ea5784ffdb61dd1b829b04fd045c 100644 (file)
@@ -28,7 +28,9 @@
 #define MAP_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
 #define MAP_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
 #define MAP_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define MAP_HUGE_32MB  HUGETLB_FLAG_ENCODE_32MB
 #define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MAP_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
 #define MAP_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
 #define MAP_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
 #define MAP_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
index dde1344f047cf04c541a21f4d983c9eb3c606ef5..6507ad0afc81d93713abee6f5790451dd4499d29 100644 (file)
@@ -65,7 +65,9 @@ struct shmid_ds {
 #define SHM_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
 #define SHM_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
 #define SHM_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define SHM_HUGE_32MB  HUGETLB_FLAG_ENCODE_32MB
 #define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define SHM_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
 #define SHM_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
 #define SHM_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
 #define SHM_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
index 4cd402e4cfeb603e2417a3796c3d9b22f3022f89..1c65fb357395eace45f25a7c6a37d91ebe30a722 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -206,7 +206,7 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
         * Callers of shm_lock() must validate the status of the returned ipc
         * object pointer and error out as appropriate.
         */
-       return (void *)ipcp;
+       return ERR_CAST(ipcp);
 }
 
 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
index 22ad967d1e5f14b9bafd41a99117b97700e041f6..830d7f095748fce8ba90adea2d536e25adce9fb3 100644 (file)
@@ -129,7 +129,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
        struct bpf_cgroup_storage *storage;
        struct bpf_storage_buffer *new;
 
-       if (flags & BPF_NOEXIST)
+       if (flags != BPF_ANY && flags != BPF_EXIST)
                return -EINVAL;
 
        storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
@@ -195,6 +195,9 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
        if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
                return ERR_PTR(-EINVAL);
 
+       if (attr->value_size == 0)
+               return ERR_PTR(-EINVAL);
+
        if (attr->value_size > PAGE_SIZE)
                return ERR_PTR(-E2BIG);
 
index bb07e74b34a225f1b21cc61ca0a43c848d3d532f..465952a8e4659ee3258884490e6c6f912d26e34d 100644 (file)
@@ -2896,6 +2896,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        u64 umin_val, umax_val;
        u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
 
+       if (insn_bitness == 32) {
+               /* Relevant for 32-bit RSH: Information can propagate towards
+                * LSB, so it isn't sufficient to only truncate the output to
+                * 32 bits.
+                */
+               coerce_reg_to_size(dst_reg, 4);
+               coerce_reg_to_size(&src_reg, 4);
+       }
+
        smin_val = src_reg.smin_value;
        smax_val = src_reg.smax_value;
        umin_val = src_reg.umin_value;
@@ -3131,7 +3140,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        if (BPF_CLASS(insn->code) != BPF_ALU64) {
                /* 32-bit ALU ops are (32,32)->32 */
                coerce_reg_to_size(dst_reg, 4);
-               coerce_reg_to_size(&src_reg, 4);
        }
 
        __reg_deduce_bounds(dst_reg);
index 9bd54304446f8b477a61a799a2d75136672548bd..1b1d63b3634b580cf6b29384e162f76b28e05102 100644 (file)
@@ -23,6 +23,9 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU
        bool
        select NEED_DMA_MAP_STATE
 
+config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+       bool
+
 config DMA_DIRECT_OPS
        bool
        depends on HAS_DMA
index dcb093e7b37770a2bbdb0e80f984e1cccae7350d..5a97f34bc14c8e2e31a452cb6fdcd38774a3b9a1 100644 (file)
@@ -8314,6 +8314,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
                        goto unlock;
 
                list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+                       if (event->cpu != smp_processor_id())
+                               continue;
                        if (event->attr.type != PERF_TYPE_TRACEPOINT)
                                continue;
                        if (event->attr.config != entry->type)
@@ -9431,9 +9433,7 @@ static void free_pmu_context(struct pmu *pmu)
        if (pmu->task_ctx_nr > perf_invalid_context)
                return;
 
-       mutex_lock(&pmus_lock);
        free_percpu(pmu->pmu_cpu_context);
-       mutex_unlock(&pmus_lock);
 }
 
 /*
@@ -9689,12 +9689,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
 
 void perf_pmu_unregister(struct pmu *pmu)
 {
-       int remove_device;
-
        mutex_lock(&pmus_lock);
-       remove_device = pmu_bus_running;
        list_del_rcu(&pmu->entry);
-       mutex_unlock(&pmus_lock);
 
        /*
         * We dereference the pmu list under both SRCU and regular RCU, so
@@ -9706,13 +9702,14 @@ void perf_pmu_unregister(struct pmu *pmu)
        free_percpu(pmu->pmu_disable_count);
        if (pmu->type >= PERF_TYPE_MAX)
                idr_remove(&pmu_idr, pmu->type);
-       if (remove_device) {
+       if (pmu_bus_running) {
                if (pmu->nr_addr_filters)
                        device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
                device_del(pmu->dev);
                put_device(pmu->dev);
        }
        free_pmu_context(pmu);
+       mutex_unlock(&pmus_lock);
 }
 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
 
index 0be047dbd8971dcd4ee1282936d3b3056f6ebfea..65a3b7e55b9fcd2b289e09d194a179f1cc8accc5 100644 (file)
@@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work)
 {
        struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
        struct ww_acquire_ctx ctx;
-       int err;
+       int err, erra = 0;
 
        ww_acquire_init(&ctx, &ww_class);
        ww_mutex_lock(&cycle->a_mutex, &ctx);
@@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work)
 
        err = ww_mutex_lock(cycle->b_mutex, &ctx);
        if (err == -EDEADLK) {
+               err = 0;
                ww_mutex_unlock(&cycle->a_mutex);
                ww_mutex_lock_slow(cycle->b_mutex, &ctx);
-               err = ww_mutex_lock(&cycle->a_mutex, &ctx);
+               erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
        }
 
        if (!err)
                ww_mutex_unlock(cycle->b_mutex);
-       ww_mutex_unlock(&cycle->a_mutex);
+       if (!erra)
+               ww_mutex_unlock(&cycle->a_mutex);
        ww_acquire_fini(&ctx);
 
-       cycle->result = err;
+       cycle->result = err ?: erra;
 }
 
 static int __test_cycle(unsigned int nthreads)
index 625bc9897f628bec7abacd797a05bff4b4f2cd85..ad97f3ba5ec51c4a9379228b60416c72e6dc5b60 100644 (file)
@@ -1167,7 +1167,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 
        if (task_cpu(p) != new_cpu) {
                if (p->sched_class->migrate_task_rq)
-                       p->sched_class->migrate_task_rq(p);
+                       p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
                rseq_migrate(p);
                perf_event_task_migrate(p);
index 997ea7b839fa048fece2738fa2a7a38472ca4f01..91e4202b0634569514659aa905fddf5b0ec6a6e7 100644 (file)
@@ -1607,7 +1607,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
        return cpu;
 }
 
-static void migrate_task_rq_dl(struct task_struct *p)
+static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
 {
        struct rq *rq;
 
index f808ddf2a868e7dbfbac8d7bcf5809b876dc51c9..7fc4a371bdd248ee94ffb7f5088bb86b5fa42262 100644 (file)
@@ -1392,6 +1392,17 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
        int last_cpupid, this_cpupid;
 
        this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
+       last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+
+       /*
+        * Allow first faults or private faults to migrate immediately early in
+        * the lifetime of a task. The magic number 4 is based on waiting for
+        * two full passes of the "multi-stage node selection" test that is
+        * executed below.
+        */
+       if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
+           (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
+               return true;
 
        /*
         * Multi-stage node selection is used in conjunction with a periodic
@@ -1410,7 +1421,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
         * This quadric squishes small probabilities, making it less likely we
         * act on an unlikely task<->page relation.
         */
-       last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
        if (!cpupid_pid_unset(last_cpupid) &&
                                cpupid_to_nid(last_cpupid) != dst_nid)
                return false;
@@ -1514,6 +1524,21 @@ struct task_numa_env {
 static void task_numa_assign(struct task_numa_env *env,
                             struct task_struct *p, long imp)
 {
+       struct rq *rq = cpu_rq(env->dst_cpu);
+
+       /* Bail out if run-queue part of active NUMA balance. */
+       if (xchg(&rq->numa_migrate_on, 1))
+               return;
+
+       /*
+        * Clear previous best_cpu/rq numa-migrate flag, since task now
+        * found a better CPU to move/swap.
+        */
+       if (env->best_cpu != -1) {
+               rq = cpu_rq(env->best_cpu);
+               WRITE_ONCE(rq->numa_migrate_on, 0);
+       }
+
        if (env->best_task)
                put_task_struct(env->best_task);
        if (p)
@@ -1552,6 +1577,13 @@ static bool load_too_imbalanced(long src_load, long dst_load,
        return (imb > old_imb);
 }
 
+/*
+ * Maximum NUMA importance can be 1998 (2*999);
+ * SMALLIMP @ 30 would be close to 1998/64.
+ * Used to deter task migration.
+ */
+#define SMALLIMP       30
+
 /*
  * This checks if the overall compute and NUMA accesses of the system would
  * be improved if the source tasks was migrated to the target dst_cpu taking
@@ -1569,6 +1601,9 @@ static void task_numa_compare(struct task_numa_env *env,
        long moveimp = imp;
        int dist = env->dist;
 
+       if (READ_ONCE(dst_rq->numa_migrate_on))
+               return;
+
        rcu_read_lock();
        cur = task_rcu_dereference(&dst_rq->curr);
        if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
@@ -1582,7 +1617,7 @@ static void task_numa_compare(struct task_numa_env *env,
                goto unlock;
 
        if (!cur) {
-               if (maymove || imp > env->best_imp)
+               if (maymove && moveimp >= env->best_imp)
                        goto assign;
                else
                        goto unlock;
@@ -1625,15 +1660,21 @@ static void task_numa_compare(struct task_numa_env *env,
                               task_weight(cur, env->dst_nid, dist);
        }
 
-       if (imp <= env->best_imp)
-               goto unlock;
-
        if (maymove && moveimp > imp && moveimp > env->best_imp) {
-               imp = moveimp - 1;
+               imp = moveimp;
                cur = NULL;
                goto assign;
        }
 
+       /*
+        * If the NUMA importance is less than SMALLIMP,
+        * task migration might only result in ping pong
+        * of tasks and also hurt performance due to cache
+        * misses.
+        */
+       if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
+               goto unlock;
+
        /*
         * In the overloaded case, try and keep the load balanced.
         */
@@ -1710,6 +1751,7 @@ static int task_numa_migrate(struct task_struct *p)
                .best_cpu = -1,
        };
        struct sched_domain *sd;
+       struct rq *best_rq;
        unsigned long taskweight, groupweight;
        int nid, ret, dist;
        long taskimp, groupimp;
@@ -1805,20 +1847,17 @@ static int task_numa_migrate(struct task_struct *p)
        if (env.best_cpu == -1)
                return -EAGAIN;
 
-       /*
-        * Reset the scan period if the task is being rescheduled on an
-        * alternative node to recheck if the tasks is now properly placed.
-        */
-       p->numa_scan_period = task_scan_start(p);
-
+       best_rq = cpu_rq(env.best_cpu);
        if (env.best_task == NULL) {
                ret = migrate_task_to(p, env.best_cpu);
+               WRITE_ONCE(best_rq->numa_migrate_on, 0);
                if (ret != 0)
                        trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
                return ret;
        }
 
        ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
+       WRITE_ONCE(best_rq->numa_migrate_on, 0);
 
        if (ret != 0)
                trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
@@ -2596,6 +2635,39 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
        }
 }
 
+static void update_scan_period(struct task_struct *p, int new_cpu)
+{
+       int src_nid = cpu_to_node(task_cpu(p));
+       int dst_nid = cpu_to_node(new_cpu);
+
+       if (!static_branch_likely(&sched_numa_balancing))
+               return;
+
+       if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
+               return;
+
+       if (src_nid == dst_nid)
+               return;
+
+       /*
+        * Allow resets if faults have been trapped before one scan
+        * has completed. This is most likely due to a new task that
+        * is pulled cross-node due to wakeups or load balancing.
+        */
+       if (p->numa_scan_seq) {
+               /*
+                * Avoid scan adjustments if moving to the preferred
+                * node or if the task was not previously running on
+                * the preferred node.
+                */
+               if (dst_nid == p->numa_preferred_nid ||
+                   (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid))
+                       return;
+       }
+
+       p->numa_scan_period = task_scan_start(p);
+}
+
 #else
 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
 {
@@ -2609,6 +2681,10 @@ static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
 {
 }
 
+static inline void update_scan_period(struct task_struct *p, int new_cpu)
+{
+}
+
 #endif /* CONFIG_NUMA_BALANCING */
 
 static void
@@ -6275,7 +6351,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
  * cfs_rq_of(p) references at time of call are still valid and identify the
  * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
  */
-static void migrate_task_rq_fair(struct task_struct *p)
+static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
 {
        /*
         * As blocked tasks retain absolute vruntime the migration needs to
@@ -6328,6 +6404,8 @@ static void migrate_task_rq_fair(struct task_struct *p)
 
        /* We have migrated, no longer consider this task hot */
        p->se.exec_start = 0;
+
+       update_scan_period(p, new_cpu);
 }
 
 static void task_dead_fair(struct task_struct *p)
index 4a2e8cae63c41111672a898d3955008cd345c69b..455fa330de0462db774f827a726478aa66abad3b 100644 (file)
@@ -783,6 +783,7 @@ struct rq {
 #ifdef CONFIG_NUMA_BALANCING
        unsigned int            nr_numa_running;
        unsigned int            nr_preferred_running;
+       unsigned int            numa_migrate_on;
 #endif
        #define CPU_LOAD_IDX_MAX 5
        unsigned long           cpu_load[CPU_LOAD_IDX_MAX];
@@ -1523,7 +1524,7 @@ struct sched_class {
 
 #ifdef CONFIG_SMP
        int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
-       void (*migrate_task_rq)(struct task_struct *p);
+       void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
 
        void (*task_woken)(struct rq *this_rq, struct task_struct *task);
 
index 25a5d87e2e4c6509ad19663fd52f3ffdab76ee7b..912aae5fa09e1763e00c7d1221243eb5c6d705e8 100644 (file)
@@ -15,7 +15,6 @@
  * but they are bigger and use more memory for the lookup table.
  */
 
-#include <linux/crc32poly.h>
 #include "xz_private.h"
 
 /*
index 482b90f363fe3e590c4dbec4d7564a3348ca1aa0..09360ebb510ef10bbc465b388215322a677381d2 100644 (file)
 #      endif
 #endif
 
+#ifndef CRC32_POLY_LE
+#define CRC32_POLY_LE 0xedb88320
+#endif
+
 /*
  * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
  * before calling xz_dec_lzma2_run().
index 6a473709e9b6b953393ea12215a38c9e47c9f849..7405c9d89d65134c003a2d3984e1a993fd9c9907 100644 (file)
@@ -19,7 +19,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
                struct gup_benchmark *gup)
 {
        ktime_t start_time, end_time;
-       unsigned long i, nr, nr_pages, addr, next;
+       unsigned long i, nr_pages, addr, next;
+       int nr;
        struct page **pages;
 
        nr_pages = gup->size / PAGE_SIZE;
index 533f9b00147d267644bcbf98da717329fb07c38f..00704060b7f79242d324af81592b0afc6384ae08 100644 (file)
@@ -2931,7 +2931,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
        else
                page_add_file_rmap(new, true);
        set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
-       if (vma->vm_flags & VM_LOCKED)
+       if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
                mlock_vma_page(new);
        update_mmu_cache_pmd(vma, address, pvmw->pmd);
 }
index 3c21775f196b2f38a25ed05687791a4c79a3d3d4..5c390f5a5207b5c0b4d1d55524d2f655c7edcae1 100644 (file)
@@ -3326,8 +3326,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        struct page *page;
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
-       const unsigned long mmun_start = start; /* For mmu_notifiers */
-       const unsigned long mmun_end   = end;   /* For mmu_notifiers */
+       unsigned long mmun_start = start;       /* For mmu_notifiers */
+       unsigned long mmun_end   = end;         /* For mmu_notifiers */
 
        WARN_ON(!is_vm_hugetlb_page(vma));
        BUG_ON(start & ~huge_page_mask(h));
@@ -3339,6 +3339,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
         */
        tlb_remove_check_page_size_change(tlb, sz);
        tlb_start_vma(tlb, vma);
+
+       /*
+        * If sharing possible, alert mmu notifiers of worst case.
+        */
+       adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        address = start;
        for (; address < end; address += sz) {
@@ -3349,6 +3354,10 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                ptl = huge_pte_lock(h, mm, ptep);
                if (huge_pmd_unshare(mm, &address, ptep)) {
                        spin_unlock(ptl);
+                       /*
+                        * We just unmapped a page of PMDs by clearing a PUD.
+                        * The caller's TLB flush range should cover this area.
+                        */
                        continue;
                }
 
@@ -3431,12 +3440,23 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 {
        struct mm_struct *mm;
        struct mmu_gather tlb;
+       unsigned long tlb_start = start;
+       unsigned long tlb_end = end;
+
+       /*
+        * If shared PMDs were possibly used within this vma range, adjust
+        * start/end for worst case tlb flushing.
+        * Note that we can not be sure if PMDs are shared until we try to
+        * unmap pages.  However, we want to make sure TLB flushing covers
+        * the largest possible range.
+        */
+       adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
 
        mm = vma->vm_mm;
 
-       tlb_gather_mmu(&tlb, mm, start, end);
+       tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
-       tlb_finish_mmu(&tlb, start, end);
+       tlb_finish_mmu(&tlb, tlb_start, tlb_end);
 }
 
 /*
@@ -4298,11 +4318,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        pte_t pte;
        struct hstate *h = hstate_vma(vma);
        unsigned long pages = 0;
+       unsigned long f_start = start;
+       unsigned long f_end = end;
+       bool shared_pmd = false;
+
+       /*
+        * In the case of shared PMDs, the area to flush could be beyond
+        * start/end.  Set f_start/f_end to cover the maximum possible
+        * range if PMD sharing is possible.
+        */
+       adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
 
        BUG_ON(address >= end);
-       flush_cache_range(vma, address, end);
+       flush_cache_range(vma, f_start, f_end);
 
-       mmu_notifier_invalidate_range_start(mm, start, end);
+       mmu_notifier_invalidate_range_start(mm, f_start, f_end);
        i_mmap_lock_write(vma->vm_file->f_mapping);
        for (; address < end; address += huge_page_size(h)) {
                spinlock_t *ptl;
@@ -4313,6 +4343,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                if (huge_pmd_unshare(mm, &address, ptep)) {
                        pages++;
                        spin_unlock(ptl);
+                       shared_pmd = true;
                        continue;
                }
                pte = huge_ptep_get(ptep);
@@ -4348,9 +4379,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
         * may have cleared our pud entry and done put_page on the page table:
         * once we release i_mmap_rwsem, another task can do the final put_page
-        * and that page table be reused and filled with junk.
+        * and that page table be reused and filled with junk.  If we actually
+        * did unshare a page of pmds, flush the range corresponding to the pud.
         */
-       flush_hugetlb_tlb_range(vma, start, end);
+       if (shared_pmd)
+               flush_hugetlb_tlb_range(vma, f_start, f_end);
+       else
+               flush_hugetlb_tlb_range(vma, start, end);
        /*
         * No need to call mmu_notifier_invalidate_range() we are downgrading
         * page table protection not changing it to point to a new page.
@@ -4358,7 +4393,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * See Documentation/vm/mmu_notifier.rst
         */
        i_mmap_unlock_write(vma->vm_file->f_mapping);
-       mmu_notifier_invalidate_range_end(mm, start, end);
+       mmu_notifier_invalidate_range_end(mm, f_start, f_end);
 
        return pages << h->order;
 }
@@ -4545,12 +4580,40 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
        /*
         * check on proper vm_flags and page table alignment
         */
-       if (vma->vm_flags & VM_MAYSHARE &&
-           vma->vm_start <= base && end <= vma->vm_end)
+       if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
                return true;
        return false;
 }
 
+/*
+ * Determine if start,end range within vma could be mapped by shared pmd.
+ * If yes, adjust start and end to cover range associated with possible
+ * shared pmd mappings.
+ */
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+       unsigned long check_addr = *start;
+
+       if (!(vma->vm_flags & VM_MAYSHARE))
+               return;
+
+       for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
+               unsigned long a_start = check_addr & PUD_MASK;
+               unsigned long a_end = a_start + PUD_SIZE;
+
+               /*
+                * If sharing is possible, adjust start/end if necessary.
+                */
+               if (range_in_vma(vma, a_start, a_end)) {
+                       if (a_start < *start)
+                               *start = a_start;
+                       if (a_end > *end)
+                               *end = a_end;
+               }
+       }
+}
+
 /*
  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
  * and returns the corresponding pte. While this is not necessary for the
@@ -4648,6 +4711,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 {
        return 0;
 }
+
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+}
 #define want_pmd_share()       (0)
 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
 
index 972a9eaa898b6ad889a4647ed207ad82bd5d0f4b..71d21df2a3f362cc370c8ff158fb03dfa9d2d03c 100644 (file)
@@ -96,7 +96,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
                new_flags |= VM_DONTDUMP;
                break;
        case MADV_DODUMP:
-               if (new_flags & VM_SPECIAL) {
+               if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
                        error = -EINVAL;
                        goto out;
                }
index d6a2e89b086a43d77f155f6b525fc15326d9c035..84381b55b2bd5c535bd181b7670a69f37bb084a4 100644 (file)
@@ -275,6 +275,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
                if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
                        mlock_vma_page(new);
 
+               if (PageTransHuge(page) && PageMlocked(page))
+                       clear_page_mlock(page);
+
                /* No need to invalidate - it was non-present before */
                update_mmu_cache(vma, pvmw.address, pvmw.pte);
        }
@@ -1411,7 +1414,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
                                 * we encounter them after the rest of the list
                                 * is processed.
                                 */
-                               if (PageTransHuge(page)) {
+                               if (PageTransHuge(page) && !PageHuge(page)) {
                                        lock_page(page);
                                        rc = split_huge_page_to_list(page, from);
                                        unlock_page(page);
@@ -1855,46 +1858,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
        return newpage;
 }
 
-/*
- * page migration rate limiting control.
- * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
- * window of time. Default here says do not migrate more than 1280M per second.
- */
-static unsigned int migrate_interval_millisecs __read_mostly = 100;
-static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
-
-/* Returns true if the node is migrate rate-limited after the update */
-static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
-                                       unsigned long nr_pages)
-{
-       /*
-        * Rate-limit the amount of data that is being migrated to a node.
-        * Optimal placement is no good if the memory bus is saturated and
-        * all the time is being spent migrating!
-        */
-       if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
-               spin_lock(&pgdat->numabalancing_migrate_lock);
-               pgdat->numabalancing_migrate_nr_pages = 0;
-               pgdat->numabalancing_migrate_next_window = jiffies +
-                       msecs_to_jiffies(migrate_interval_millisecs);
-               spin_unlock(&pgdat->numabalancing_migrate_lock);
-       }
-       if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
-               trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
-                                                               nr_pages);
-               return true;
-       }
-
-       /*
-        * This is an unlocked non-atomic update so errors are possible.
-        * The consequences are failing to migrate when we potentiall should
-        * have which is not severe enough to warrant locking. If it is ever
-        * a problem, it can be converted to a per-cpu counter.
-        */
-       pgdat->numabalancing_migrate_nr_pages += nr_pages;
-       return false;
-}
-
 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 {
        int page_lru;
@@ -1967,14 +1930,6 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
        if (page_is_file_cache(page) && PageDirty(page))
                goto out;
 
-       /*
-        * Rate-limit the amount of data that is being migrated to a node.
-        * Optimal placement is no good if the memory bus is saturated and
-        * all the time is being spent migrating!
-        */
-       if (numamigrate_update_ratelimit(pgdat, 1))
-               goto out;
-
        isolated = numamigrate_isolate_page(pgdat, page);
        if (!isolated)
                goto out;
@@ -2021,14 +1976,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        unsigned long mmun_start = address & HPAGE_PMD_MASK;
        unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
 
-       /*
-        * Rate-limit the amount of data that is being migrated to a node.
-        * Optimal placement is no good if the memory bus is saturated and
-        * all the time is being spent migrating!
-        */
-       if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
-               goto out_dropref;
-
        new_page = alloc_pages_node(node,
                (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
                HPAGE_PMD_ORDER);
@@ -2125,7 +2072,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 
 out_fail:
        count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-out_dropref:
        ptl = pmd_lock(mm, pmd);
        if (pmd_same(*pmd, entry)) {
                entry = pmd_modify(entry, vma->vm_page_prot);
index 89d2a2ab3fe68c3ae46104074c519c7500dd86cb..706a738c0aeed26a904e43a7a1d5d1a45453f721 100644 (file)
@@ -6197,8 +6197,6 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
 static void pgdat_init_numabalancing(struct pglist_data *pgdat)
 {
        spin_lock_init(&pgdat->numabalancing_migrate_lock);
-       pgdat->numabalancing_migrate_nr_pages = 0;
-       pgdat->numabalancing_migrate_next_window = jiffies;
 }
 #else
 static void pgdat_init_numabalancing(struct pglist_data *pgdat) {}
index eb477809a5c0a534e2977f6fd6c1df74a05bc170..1e79fac3186b63208cbe37a8c05597c44d2234c9 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1362,11 +1362,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        }
 
        /*
-        * We have to assume the worse case ie pmd for invalidation. Note that
-        * the page can not be free in this function as call of try_to_unmap()
-        * must hold a reference on the page.
+        * For THP, we have to assume the worse case ie pmd for invalidation.
+        * For hugetlb, it could be much worse if we need to do pud
+        * invalidation in the case of pmd sharing.
+        *
+        * Note that the page can not be free in this function as call of
+        * try_to_unmap() must hold a reference on the page.
         */
        end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
+       if (PageHuge(page)) {
+               /*
+                * If sharing is possible, start and end will be adjusted
+                * accordingly.
+                */
+               adjust_range_if_pmd_sharing_possible(vma, &start, &end);
+       }
        mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
        while (page_vma_mapped_walk(&pvmw)) {
@@ -1409,6 +1419,32 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
                address = pvmw.address;
 
+               if (PageHuge(page)) {
+                       if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
+                               /*
+                                * huge_pmd_unshare unmapped an entire PMD
+                                * page.  There is no way of knowing exactly
+                                * which PMDs may be cached for this mm, so
+                                * we must flush them all.  start/end were
+                                * already adjusted above to cover this range.
+                                */
+                               flush_cache_range(vma, start, end);
+                               flush_tlb_range(vma, start, end);
+                               mmu_notifier_invalidate_range(mm, start, end);
+
+                               /*
+                                * The ref count of the PMD page was dropped
+                                * which is part of the way map counting
+                                * is done for shared PMDs.  Return 'true'
+                                * here.  When there is no other sharing,
+                                * huge_pmd_unshare returns false and we will
+                                * unmap the actual page and drop map count
+                                * to zero.
+                                */
+                               page_vma_mapped_walk_done(&pvmw);
+                               break;
+                       }
+               }
 
                if (IS_ENABLED(CONFIG_MIGRATION) &&
                    (flags & TTU_MIGRATION) &&
index c7ce2c1612259c45896bae9739a288966a28c970..c5ef7240cbcbba05b4ef759b3c05d1ffdb163369 100644 (file)
@@ -580,8 +580,8 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
                        struct mem_cgroup *memcg, int priority)
 {
        struct memcg_shrinker_map *map;
-       unsigned long freed = 0;
-       int ret, i;
+       unsigned long ret, freed = 0;
+       int i;
 
        if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
                return 0;
@@ -677,9 +677,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
                                 struct mem_cgroup *memcg,
                                 int priority)
 {
+       unsigned long ret, freed = 0;
        struct shrinker *shrinker;
-       unsigned long freed = 0;
-       int ret;
 
        if (!mem_cgroup_is_root(memcg))
                return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
index 8ba0870ecddd0fd592d16ee674b060db512b5b37..7878da76abf2d21992b322ba2a3e65386a215467 100644 (file)
@@ -1275,6 +1275,9 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_SMP
        "nr_tlb_remote_flush",
        "nr_tlb_remote_flush_received",
+#else
+       "", /* nr_tlb_remote_flush */
+       "", /* nr_tlb_remote_flush_received */
 #endif /* CONFIG_SMP */
        "nr_tlb_local_flush_all",
        "nr_tlb_local_flush_one",
@@ -1283,7 +1286,6 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_DEBUG_VM_VMACACHE
        "vmacache_find_calls",
        "vmacache_find_hits",
-       "vmacache_full_flushes",
 #endif
 #ifdef CONFIG_SWAP
        "swap_ra",
index 3bdc8f3ca259ed2d82bb9861033814d65591a51c..ccce954f814682a40ba5d8af0ab463d5b0bfda3b 100644 (file)
@@ -2434,9 +2434,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        /* LE address type */
        addr_type = le_addr_type(cp->addr.type);
 
-       hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
-
-       err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+       /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
+       err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
        if (err < 0) {
                err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
                                        MGMT_STATUS_NOT_PAIRED, &rp,
@@ -2450,8 +2449,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                goto done;
        }
 
-       /* Abort any ongoing SMP pairing */
-       smp_cancel_pairing(conn);
 
        /* Defer clearing up the connection parameters until closing to
         * give a chance of keeping them if a repairing happens.
index 3a7b0773536b8e226546ebe417463fcba5c92ba4..73f7211d0431a0f766dfacd488077a081054b67c 100644 (file)
@@ -2422,30 +2422,51 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
        return ret;
 }
 
-void smp_cancel_pairing(struct hci_conn *hcon)
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                 u8 addr_type)
 {
-       struct l2cap_conn *conn = hcon->l2cap_data;
+       struct hci_conn *hcon;
+       struct l2cap_conn *conn;
        struct l2cap_chan *chan;
        struct smp_chan *smp;
+       int err;
+
+       err = hci_remove_ltk(hdev, bdaddr, addr_type);
+       hci_remove_irk(hdev, bdaddr, addr_type);
+
+       hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
+       if (!hcon)
+               goto done;
 
+       conn = hcon->l2cap_data;
        if (!conn)
-               return;
+               goto done;
 
        chan = conn->smp;
        if (!chan)
-               return;
+               goto done;
 
        l2cap_chan_lock(chan);
 
        smp = chan->data;
        if (smp) {
+               /* Set keys to NULL to make sure smp_failure() does not try to
+                * remove and free already invalidated rcu list entries. */
+               smp->ltk = NULL;
+               smp->slave_ltk = NULL;
+               smp->remote_irk = NULL;
+
                if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
                        smp_failure(conn, 0);
                else
                        smp_failure(conn, SMP_UNSPECIFIED);
+               err = 0;
        }
 
        l2cap_chan_unlock(chan);
+
+done:
+       return err;
 }
 
 static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
index 0ff6247eaa6c0e8c19223c014d11a98d8adaee8a..121edadd5f8da8761c7ef464c22e6455a25d2d27 100644 (file)
@@ -181,7 +181,8 @@ enum smp_key_pref {
 };
 
 /* SMP Commands */
-void smp_cancel_pairing(struct hci_conn *hcon);
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                 u8 addr_type);
 bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
                             enum smp_key_pref key_pref);
 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
index f0fc182d3db77eb311d91f7faef4e8a6f85886b3..b64e1649993b78939c58394aee788b48b8cefffe 100644 (file)
@@ -59,7 +59,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
        req.is_set = is_set;
        req.pid = current->pid;
        req.cmd = optname;
-       req.addr = (long)optval;
+       req.addr = (long __force __user)optval;
        req.len = optlen;
        mutex_lock(&bpfilter_lock);
        if (!info.pid)
@@ -98,7 +98,7 @@ static int __init load_umh(void)
        pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
 
        /* health check that usermode process started correctly */
-       if (__bpfilter_process_sockopt(NULL, 0, 0, 0, 0) != 0) {
+       if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
                stop_umh();
                return -EFAULT;
        }
index 6e0dc6bcd32af7e056e3006fdc0a483aaf4771d1..37278dc280eb3540b24ec22502f01bea2c124199 100644 (file)
@@ -835,7 +835,8 @@ static unsigned int ip_sabotage_in(void *priv,
                                   struct sk_buff *skb,
                                   const struct nf_hook_state *state)
 {
-       if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
+       if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
+           !netif_is_l3_master(skb->dev)) {
                state->okfn(state->net, state->sk, skb);
                return NF_STOLEN;
        }
index 234a0ec2e9327727e95a76f44a29e8cc93feff84..0762aaf8e964ec4c517984fdff8ddfdc4afef99e 100644 (file)
@@ -1483,6 +1483,7 @@ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
 {
        struct ethtool_wolinfo wol;
+       int ret;
 
        if (!dev->ethtool_ops->set_wol)
                return -EOPNOTSUPP;
@@ -1490,7 +1491,13 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
        if (copy_from_user(&wol, useraddr, sizeof(wol)))
                return -EFAULT;
 
-       return dev->ethtool_ops->set_wol(dev, &wol);
+       ret = dev->ethtool_ops->set_wol(dev, &wol);
+       if (ret)
+               return ret;
+
+       dev->wol_enabled = !!wol.wolopts;
+
+       return 0;
 }
 
 static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
index 3219a2932463096566ce8ff336ecdf699422dd65..de1d1ba92f2de39292987e1408db0c2b821c4b6d 100644 (file)
@@ -135,27 +135,9 @@ static void queue_process(struct work_struct *work)
        }
 }
 
-/*
- * Check whether delayed processing was scheduled for our NIC. If so,
- * we attempt to grab the poll lock and use ->poll() to pump the card.
- * If this fails, either we've recursed in ->poll() or it's already
- * running on another CPU.
- *
- * Note: we don't mask interrupts with this lock because we're using
- * trylock here and interrupts are already disabled in the softirq
- * case. Further, we test the poll_owner to avoid recursion on UP
- * systems where the lock doesn't exist.
- */
 static void poll_one_napi(struct napi_struct *napi)
 {
-       int work = 0;
-
-       /* net_rx_action's ->poll() invocations and our's are
-        * synchronized by this test which is only made while
-        * holding the napi->poll_lock.
-        */
-       if (!test_bit(NAPI_STATE_SCHED, &napi->state))
-               return;
+       int work;
 
        /* If we set this bit but see that it has already been set,
         * that indicates that napi has been disabled and we need
@@ -330,6 +312,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        /* It is up to the caller to keep npinfo alive. */
        struct netpoll_info *npinfo;
 
+       rcu_read_lock_bh();
        lockdep_assert_irqs_disabled();
 
        npinfo = rcu_dereference_bh(np->dev->npinfo);
@@ -374,6 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                skb_queue_tail(&npinfo->txq, skb);
                schedule_delayed_work(&npinfo->tx_work,0);
        }
+       rcu_read_unlock_bh();
 }
 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
index 63ce2283a456a4aeabf3c10b681fe63195209101..37c7936124e618556c1b2a789383d6a4860d2c9f 100644 (file)
@@ -1898,10 +1898,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                if (tb[IFLA_IF_NETNSID]) {
                        netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
                        tgt_net = get_target_net(skb->sk, netnsid);
-                       if (IS_ERR(tgt_net)) {
-                               tgt_net = net;
-                               netnsid = -1;
-                       }
+                       if (IS_ERR(tgt_net))
+                               return PTR_ERR(tgt_net);
                }
 
                if (tb[IFLA_EXT_MASK])
@@ -2837,6 +2835,12 @@ struct net_device *rtnl_create_link(struct net *net,
        else if (ops->get_num_rx_queues)
                num_rx_queues = ops->get_num_rx_queues();
 
+       if (num_tx_queues < 1 || num_tx_queues > 4096)
+               return ERR_PTR(-EINVAL);
+
+       if (num_rx_queues < 1 || num_rx_queues > 4096)
+               return ERR_PTR(-EINVAL);
+
        dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
                               ops->setup, num_tx_queues, num_rx_queues);
        if (!dev)
@@ -3744,16 +3748,27 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int err = 0;
        int fidx = 0;
 
-       err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
-                         IFLA_MAX, ifla_policy, NULL);
-       if (err < 0) {
-               return -EINVAL;
-       } else if (err == 0) {
-               if (tb[IFLA_MASTER])
-                       br_idx = nla_get_u32(tb[IFLA_MASTER]);
-       }
+       /* A hack to preserve kernel<->userspace interface.
+        * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
+        * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
+        * So, check for ndmsg with an optional u32 attribute (not used here).
+        * Fortunately these sizes don't conflict with the size of ifinfomsg
+        * with an optional attribute.
+        */
+       if (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) &&
+           (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) +
+            nla_attr_size(sizeof(u32)))) {
+               err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
+                                 IFLA_MAX, ifla_policy, NULL);
+               if (err < 0) {
+                       return -EINVAL;
+               } else if (err == 0) {
+                       if (tb[IFLA_MASTER])
+                               br_idx = nla_get_u32(tb[IFLA_MASTER]);
+               }
 
-       brport_idx = ifm->ifi_index;
+               brport_idx = ifm->ifi_index;
+       }
 
        if (br_idx) {
                br_dev = __dev_get_by_index(net, br_idx);
index d28d46bff6ab43441f34284ec975c1e052a774d0..85d6c879383da8994c6b20cd1e49e0f667a07482 100644 (file)
@@ -606,11 +606,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        if (sk->sk_state == DCCP_LISTEN) {
                if (dh->dccph_type == DCCP_PKT_REQUEST) {
                        /* It is possible that we process SYN packets from backlog,
-                        * so we need to make sure to disable BH right there.
+                        * so we need to make sure to disable BH and RCU right there.
                         */
+                       rcu_read_lock();
                        local_bh_disable();
                        acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
                        local_bh_enable();
+                       rcu_read_unlock();
                        if (!acceptable)
                                return 1;
                        consume_skb(skb);
index b08feb219b44b67eadf408a33649d8c7ec9db2d0..8e08cea6f17866b5fb1619f570de747c6a837cbd 100644 (file)
@@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
 
                dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
                                                              ireq->ir_rmt_addr);
+               rcu_read_lock();
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq_opt_deref(ireq));
+                                           rcu_dereference(ireq->ireq_opt));
+               rcu_read_unlock();
                err = net_xmit_eval(err);
        }
 
index dfd5009f96ef7111a593651a48f73c4a92c3ed15..15e7f7915a21e0fbce09d5d2c17d877eae499e03 100644 (file)
@@ -544,7 +544,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
        struct ip_options_rcu *opt;
        struct rtable *rt;
 
-       opt = ireq_opt_deref(ireq);
+       rcu_read_lock();
+       opt = rcu_dereference(ireq->ireq_opt);
 
        flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
@@ -558,11 +559,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
                goto no_route;
        if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
                goto route_err;
+       rcu_read_unlock();
        return &rt->dst;
 
 route_err:
        ip_rt_put(rt);
 no_route:
+       rcu_read_unlock();
        __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
        return NULL;
 }
index c0fe5ad996f238091f5b9585adb586a571f653f0..26c36cccabdc2c8cc95cfd609672d412c493fc42 100644 (file)
@@ -149,7 +149,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
        struct sockaddr_in sin;
-       const struct iphdr *iph = ip_hdr(skb);
        __be16 *ports;
        int end;
 
@@ -164,7 +163,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
        ports = (__be16 *)skb_transport_header(skb);
 
        sin.sin_family = AF_INET;
-       sin.sin_addr.s_addr = iph->daddr;
+       sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
        sin.sin_port = ports[1];
        memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
 
index b92f422f2fa805cd5cca8264fe9ae5aa6d6a65b8..891ed2f91467b9345743682a3dd6e818acb48fbd 100644 (file)
@@ -48,6 +48,7 @@ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 static int comp_sack_nr_max = 255;
+static u32 u32_max_div_HZ = UINT_MAX / HZ;
 
 /* obsolete */
 static int sysctl_tcp_low_latency __read_mostly;
@@ -745,9 +746,10 @@ static struct ctl_table ipv4_net_table[] = {
        {
                .procname       = "tcp_probe_interval",
                .data           = &init_net.ipv4.sysctl_tcp_probe_interval,
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(u32),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_douintvec_minmax,
+               .extra2         = &u32_max_div_HZ,
        },
        {
                .procname       = "igmp_link_local_mcast_reports",
index 4cf2f7bb2802ad4ae968b5a6dfb9d005ed619c76..47e08c1b5bc3e14e6ae2851b7ec8de91a3eb4a35 100644 (file)
@@ -6009,11 +6009,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                        if (th->fin)
                                goto discard;
                        /* It is possible that we process SYN packets from backlog,
-                        * so we need to make sure to disable BH right there.
+                        * so we need to make sure to disable BH and RCU right there.
                         */
+                       rcu_read_lock();
                        local_bh_disable();
                        acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
                        local_bh_enable();
+                       rcu_read_unlock();
 
                        if (!acceptable)
                                return 1;
index 44c09eddbb781c03da2417aaa925e360de01a6e9..cd426313a29819b34648086b551fe9390d8a0b0a 100644 (file)
@@ -943,9 +943,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
+               rcu_read_lock();
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq_opt_deref(ireq));
+                                           rcu_dereference(ireq->ireq_opt));
+               rcu_read_unlock();
                err = net_xmit_eval(err);
        }
 
index bcfc00e88756dabb1f491d3d41137ccbc7ab1cbc..f8de2482a52923709ed58c401785a6ac60771932 100644 (file)
@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
 
        if (xo && (xo->flags & XFRM_GRO)) {
                skb_mac_header_rebuild(skb);
+               skb_reset_transport_header(skb);
                return 0;
        }
 
index 3d36644890bb6d3b0a755c811c60e920ad5cd8b8..1ad2c2c4e250f84b1ad73020c727ea8b68b3e0d3 100644 (file)
@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int ihl = skb->data - skb_transport_header(skb);
-       struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (skb->transport_header != skb->network_header) {
                memmove(skb_transport_header(skb),
@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
                skb->network_header = skb->transport_header;
        }
        ip_hdr(skb)->tot_len = htons(skb->len + ihl);
-       if (!xo || !(xo->flags & XFRM_GRO))
-               skb_reset_transport_header(skb);
+       skb_reset_transport_header(skb);
        return 0;
 }
 
index 413d98bf24f4c9f9644b79590369b9188713926e..5e0efd3954e90ade89eb4da17cd5ecef1894a1a3 100644 (file)
@@ -651,8 +651,6 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
        skb->tstamp = sockc->transmit_time;
-       skb_dst_set(skb, &rt->dst);
-       *dstp = NULL;
 
        skb_put(skb, length);
        skb_reset_network_header(skb);
@@ -665,8 +663,14 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
 
        skb->transport_header = skb->network_header;
        err = memcpy_from_msg(iph, msg, length);
-       if (err)
-               goto error_fault;
+       if (err) {
+               err = -EFAULT;
+               kfree_skb(skb);
+               goto error;
+       }
+
+       skb_dst_set(skb, &rt->dst);
+       *dstp = NULL;
 
        /* if egress device is enslaved to an L3 master device pass the
         * skb to its handler for processing
@@ -675,21 +679,28 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
        if (unlikely(!skb))
                return 0;
 
+       /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
+        * in the error path. Since skb has been freed, the dst could
+        * have been queued for deletion.
+        */
+       rcu_read_lock();
        IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
        err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
                      NULL, rt->dst.dev, dst_output);
        if (err > 0)
                err = net_xmit_errno(err);
-       if (err)
-               goto error;
+       if (err) {
+               IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+               rcu_read_unlock();
+               goto error_check;
+       }
+       rcu_read_unlock();
 out:
        return 0;
 
-error_fault:
-       err = -EFAULT;
-       kfree_skb(skb);
 error:
        IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+error_check:
        if (err == -ENOBUFS && !np->recverr)
                err = 0;
        return err;
index 826b14de7dbbc8d1e2100820374654e5722c32b6..a366c05a239da50e98ced776b66d34f923900701 100644 (file)
@@ -4321,11 +4321,6 @@ static int ip6_route_info_append(struct net *net,
        if (!nh)
                return -ENOMEM;
        nh->fib6_info = rt;
-       err = ip6_convert_metrics(net, rt, r_cfg);
-       if (err) {
-               kfree(nh);
-               return err;
-       }
        memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
        list_add_tail(&nh->next, rt6_nh_list);
 
index 841f4a07438e83502eadd6ec6c16a16d1de6aa55..9ef490dddcea23b82bd703217bfdde49dce41069 100644 (file)
@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
 
        if (xo && (xo->flags & XFRM_GRO)) {
                skb_mac_header_rebuild(skb);
+               skb_reset_transport_header(skb);
                return -1;
        }
 
index 9ad07a91708ef7a1008d469766ab39b9b882883f..3c29da5defe6c357ff04ca4adead1a9fee208f08 100644 (file)
@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int ihl = skb->data - skb_transport_header(skb);
-       struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (skb->transport_header != skb->network_header) {
                memmove(skb_transport_header(skb),
@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
        }
        ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
                                           sizeof(struct ipv6hdr));
-       if (!xo || !(xo->flags & XFRM_GRO))
-               skb_reset_transport_header(skb);
+       skb_reset_transport_header(skb);
        return 0;
 }
 
index 5959ce9620eb92ece2830d6a59ed21d562a3a1cf..6a74080005cf6acf15fa59d6d3dd14cbf01a1781 100644 (file)
@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 
        if (toobig && xfrm6_local_dontfrag(skb)) {
                xfrm6_local_rxpmtu(skb, mtu);
+               kfree_skb(skb);
                return -EMSGSIZE;
        } else if (!skb->ignore_df && toobig && skb->sk) {
                xfrm_local_error(skb, mtu);
+               kfree_skb(skb);
                return -EMSGSIZE;
        }
 
index d25da0e66da16218c340e4c7f8a9aaf663985b9c..5d22eda8a6b1e9d4f8d6f45c1c8c633a892041d3 100644 (file)
@@ -427,7 +427,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_AP_VLAN:
                /* Keys without a station are used for TX only */
-               if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+               if (sta && test_sta_flag(sta, WLAN_STA_MFP))
                        key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
                break;
        case NL80211_IFTYPE_ADHOC:
index 5e6cf2cee965264dd45cda775b370b6dcb022413..5836ddeac9e34ecd2aa6e51363679d2cd11f266d 100644 (file)
@@ -1756,7 +1756,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                if (local->ops->wake_tx_queue &&
                    type != NL80211_IFTYPE_AP_VLAN &&
-                   type != NL80211_IFTYPE_MONITOR)
+                   (type != NL80211_IFTYPE_MONITOR ||
+                    (params->flags & MONITOR_FLAG_ACTIVE)))
                        txq_size += sizeof(struct txq_info) +
                                    local->hw.txq_data_size;
 
index ee56f18cad3f7e89e1c60fe4829dab7bfa1ef340..21526630bf6559fed1ecd1894a4796db5216fd56 100644 (file)
@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_init(void);
 void ieee80211s_update_metric(struct ieee80211_local *local,
-                             struct sta_info *sta, struct sk_buff *skb);
+                             struct sta_info *sta,
+                             struct ieee80211_tx_status *st);
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
 int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
index daf9db3c8f24f389df84d95ae973c969d65622f1..6950cd0bf5940a0bc76ea0f3bc283c4a1cac7963 100644 (file)
@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 }
 
 void ieee80211s_update_metric(struct ieee80211_local *local,
-               struct sta_info *sta, struct sk_buff *skb)
+                             struct sta_info *sta,
+                             struct ieee80211_tx_status *st)
 {
-       struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_tx_info *txinfo = st->info;
        int failed;
 
-       if (!ieee80211_is_data(hdr->frame_control))
-               return;
-
        failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 
        /* moving average, scaled to 100.
index 9a6d7208bf4f809b8cb78856e688c19730ee097e..91d7c0cd18824042044a861cfc1bcb4308c803de 100644 (file)
@@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
        if (!skb)
                return;
 
-       if (dropped) {
-               dev_kfree_skb_any(skb);
-               return;
-       }
-
        if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
                u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
                struct ieee80211_sub_if_data *sdata;
@@ -506,6 +501,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
                }
                rcu_read_unlock();
 
+               dev_kfree_skb_any(skb);
+       } else if (dropped) {
                dev_kfree_skb_any(skb);
        } else {
                /* consumes skb */
@@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
 
                rate_control_tx_status(local, sband, status);
                if (ieee80211_vif_is_mesh(&sta->sdata->vif))
-                       ieee80211s_update_metric(local, sta, skb);
+                       ieee80211s_update_metric(local, sta, status);
 
                if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
                        ieee80211_frame_acked(sta, skb);
@@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
                }
 
                rate_control_tx_status(local, sband, status);
+               if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+                       ieee80211s_update_metric(local, sta, status);
        }
 
        if (acked || noack_success) {
index 5cd5e6e5834efc820c94d299dadfd9164e217253..6c647f425e057d6d3c56acc3ceeb108868541967 100644 (file)
@@ -16,6 +16,7 @@
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 #include "rate.h"
+#include "wme.h"
 
 /* give usermode some time for retries in setting up the TDLS session */
 #define TDLS_PEER_SETUP_TIMEOUT        (15 * HZ)
@@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
        switch (action_code) {
        case WLAN_TDLS_SETUP_REQUEST:
        case WLAN_TDLS_SETUP_RESPONSE:
-               skb_set_queue_mapping(skb, IEEE80211_AC_BK);
-               skb->priority = 2;
+               skb->priority = 256 + 2;
                break;
        default:
-               skb_set_queue_mapping(skb, IEEE80211_AC_VI);
-               skb->priority = 5;
+               skb->priority = 256 + 5;
                break;
        }
+       skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
 
        /*
         * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
index f353d9db54bc1f049e14b20713af88bde1da3c62..25ba24bef8f51f669ccbcc95cfcb5f1d235e39a9 100644 (file)
@@ -214,6 +214,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
 {
        struct ieee80211_local *local = tx->local;
        struct ieee80211_if_managed *ifmgd;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 
        /* driver doesn't support power save */
        if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
@@ -242,6 +243,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
        if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
                return TX_CONTINUE;
 
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
+               return TX_CONTINUE;
+
        ifmgd = &tx->sdata->u.mgd;
 
        /*
@@ -1890,7 +1894,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                        sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
 
        if (invoke_tx_handlers_early(&tx))
-               return false;
+               return true;
 
        if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
                return true;
index b4bdf9eda7b740dccb6501c5b0155c32d17d7e5b..247b89784a6fb41141bb20cc4d4e5987b33e17a7 100644 (file)
@@ -1213,8 +1213,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
 #define TCP_NLATTR_SIZE        ( \
        NLA_ALIGN(NLA_HDRLEN + 1) + \
        NLA_ALIGN(NLA_HDRLEN + 1) + \
-       NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
-       NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
+       NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
+       NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
 
 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
 {
index 5af74b37f4236ec0402ac41360f04a5000ae3126..a35fb59ace7326324811a21704eb2932e2ca5a4c 100644 (file)
@@ -49,7 +49,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
 
        priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
        err = nft_validate_register_store(ctx, priv->dreg, NULL,
-                                         NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN);
+                                         NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
        if (err < 0)
                return err;
 
index 55e2d9215c0d4fe488c0d78c5ce41979e9098852..0e5ec126f6ad0516acf0576f01c4430dec43aec8 100644 (file)
@@ -355,12 +355,11 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
 
 static void nft_rbtree_gc(struct work_struct *work)
 {
+       struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
        struct nft_set_gc_batch *gcb = NULL;
-       struct rb_node *node, *prev = NULL;
-       struct nft_rbtree_elem *rbe;
        struct nft_rbtree *priv;
+       struct rb_node *node;
        struct nft_set *set;
-       int i;
 
        priv = container_of(work, struct nft_rbtree, gc_work.work);
        set  = nft_set_container_of(priv);
@@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
                if (nft_rbtree_interval_end(rbe)) {
-                       prev = node;
+                       rbe_end = rbe;
                        continue;
                }
                if (!nft_set_elem_expired(&rbe->ext))
@@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
                if (nft_set_elem_mark_busy(&rbe->ext))
                        continue;
 
+               if (rbe_prev) {
+                       rb_erase(&rbe_prev->node, &priv->root);
+                       rbe_prev = NULL;
+               }
                gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
                if (!gcb)
                        break;
 
                atomic_dec(&set->nelems);
                nft_set_gc_batch_add(gcb, rbe);
+               rbe_prev = rbe;
 
-               if (prev) {
-                       rbe = rb_entry(prev, struct nft_rbtree_elem, node);
+               if (rbe_end) {
                        atomic_dec(&set->nelems);
-                       nft_set_gc_batch_add(gcb, rbe);
-                       prev = NULL;
+                       nft_set_gc_batch_add(gcb, rbe_end);
+                       rb_erase(&rbe_end->node, &priv->root);
+                       rbe_end = NULL;
                }
                node = rb_next(node);
                if (!node)
                        break;
        }
-       if (gcb) {
-               for (i = 0; i < gcb->head.cnt; i++) {
-                       rbe = gcb->elems[i];
-                       rb_erase(&rbe->node, &priv->root);
-               }
-       }
+       if (rbe_prev)
+               rb_erase(&rbe_prev->node, &priv->root);
        write_seqcount_end(&priv->count);
        write_unlock_bh(&priv->lock);
 
index 0472f34728423ac1a3ba839a72e4aab167df1091..ada144e5645bb3075b36b5c4fd23a1bb9020c874 100644 (file)
@@ -56,7 +56,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        struct sk_buff *pskb = (struct sk_buff *)skb;
        struct sock *sk = skb->sk;
 
-       if (!net_eq(xt_net(par), sock_net(sk)))
+       if (sk && !net_eq(xt_net(par), sock_net(sk)))
                sk = NULL;
 
        if (!sk)
@@ -117,7 +117,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
        struct sk_buff *pskb = (struct sk_buff *)skb;
        struct sock *sk = skb->sk;
 
-       if (!net_eq(xt_net(par), sock_net(sk)))
+       if (sk && !net_eq(xt_net(par), sock_net(sk)))
                sk = NULL;
 
        if (!sk)
index 86a75105af1a2726bc52e44e6c3ac691d719999f..35ae64cbef33fa733c3ffb98bffcdbae78c2273a 100644 (file)
@@ -1312,6 +1312,10 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
 
        rcu_assign_pointer(help->helper, helper);
        info->helper = helper;
+
+       if (info->nat)
+               request_module("ip_nat_%s", name);
+
        return 0;
 }
 
@@ -1624,10 +1628,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
                OVS_NLERR(log, "Failed to allocate conntrack template");
                return -ENOMEM;
        }
-
-       __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
-       nf_conntrack_get(&ct_info.ct->ct_general);
-
        if (helper) {
                err = ovs_ct_add_helper(&ct_info, helper, key, log);
                if (err)
@@ -1639,6 +1639,8 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
        if (err)
                goto err_free_ct;
 
+       __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
+       nf_conntrack_get(&ct_info.ct->ct_general);
        return 0;
 err_free_ct:
        __ovs_ct_free_action(&ct_info);
index 75c92a87e7b2481141161c8945f5e7eef8e0abf8..d6e94dc7e2900bdf91baf98ec0319586a9e06660 100644 (file)
@@ -2715,10 +2715,12 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                        }
                }
 
-               if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
-                                                             vio_le())) {
-                       tp_len = -EINVAL;
-                       goto tpacket_error;
+               if (po->has_vnet_hdr) {
+                       if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
+                               tp_len = -EINVAL;
+                               goto tpacket_error;
+                       }
+                       virtio_net_hdr_set_proto(skb, vnet_hdr);
                }
 
                skb->destructor = tpacket_destruct_skb;
@@ -2915,6 +2917,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                if (err)
                        goto out_free;
                len += sizeof(vnet_hdr);
+               virtio_net_hdr_set_proto(skb, &vnet_hdr);
        }
 
        skb_probe_transport_header(skb, reserve);
index c9755871042159bdf32a7bd980d8ee4d3a9a51bf..ef9554131434496ae02ab6c47418c13ebbc3b239 100644 (file)
@@ -40,17 +40,12 @@ struct rxrpc_crypt {
 struct rxrpc_connection;
 
 /*
- * Mark applied to socket buffers.
+ * Mark applied to socket buffers in skb->mark.  skb->priority is used
+ * to pass supplementary information.
  */
 enum rxrpc_skb_mark {
-       RXRPC_SKB_MARK_DATA,            /* data message */
-       RXRPC_SKB_MARK_FINAL_ACK,       /* final ACK received message */
-       RXRPC_SKB_MARK_BUSY,            /* server busy message */
-       RXRPC_SKB_MARK_REMOTE_ABORT,    /* remote abort message */
-       RXRPC_SKB_MARK_LOCAL_ABORT,     /* local abort message */
-       RXRPC_SKB_MARK_NET_ERROR,       /* network error message */
-       RXRPC_SKB_MARK_LOCAL_ERROR,     /* local error message */
-       RXRPC_SKB_MARK_NEW_CALL,        /* local error message */
+       RXRPC_SKB_MARK_REJECT_BUSY,     /* Reject with BUSY */
+       RXRPC_SKB_MARK_REJECT_ABORT,    /* Reject with ABORT (code in skb->priority) */
 };
 
 /*
@@ -293,7 +288,6 @@ struct rxrpc_peer {
        struct hlist_node       hash_link;
        struct rxrpc_local      *local;
        struct hlist_head       error_targets;  /* targets for net error distribution */
-       struct work_struct      error_distributor;
        struct rb_root          service_conns;  /* Service connections */
        struct list_head        keepalive_link; /* Link in net->peer_keepalive[] */
        time64_t                last_tx_at;     /* Last time packet sent here */
@@ -304,8 +298,6 @@ struct rxrpc_peer {
        unsigned int            maxdata;        /* data size (MTU - hdrsize) */
        unsigned short          hdrsize;        /* header size (IP + UDP + RxRPC) */
        int                     debug_id;       /* debug ID for printks */
-       int                     error_report;   /* Net (+0) or local (+1000000) to distribute */
-#define RXRPC_LOCAL_ERROR_OFFSET 1000000
        struct sockaddr_rxrpc   srx;            /* remote address */
 
        /* calculated RTT cache */
@@ -463,6 +455,16 @@ struct rxrpc_connection {
        u8                      out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
 };
 
+static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
+{
+       return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
+}
+
+static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
+{
+       return !rxrpc_to_server(sp);
+}
+
 /*
  * Flags in call->flags.
  */
@@ -717,6 +719,8 @@ extern struct workqueue_struct *rxrpc_workqueue;
 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
 void rxrpc_discard_prealloc(struct rxrpc_sock *);
 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
+                                          struct rxrpc_sock *,
+                                          struct rxrpc_peer *,
                                           struct rxrpc_connection *,
                                           struct sk_buff *);
 void rxrpc_accept_incoming_calls(struct rxrpc_local *);
@@ -908,7 +912,8 @@ extern unsigned int rxrpc_closed_conn_expiry;
 
 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
-                                                  struct sk_buff *);
+                                                  struct sk_buff *,
+                                                  struct rxrpc_peer **);
 void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
 void rxrpc_disconnect_call(struct rxrpc_call *);
 void rxrpc_kill_connection(struct rxrpc_connection *);
@@ -1031,7 +1036,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
  * peer_event.c
  */
 void rxrpc_error_report(struct sock *);
-void rxrpc_peer_error_distributor(struct work_struct *);
 void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
                        rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
 void rxrpc_peer_keepalive_worker(struct work_struct *);
@@ -1044,13 +1048,11 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
                                     struct sockaddr_rxrpc *, gfp_t);
 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
-                                             struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *);
 void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
 void rxrpc_put_peer(struct rxrpc_peer *);
-void __rxrpc_queue_peer_error(struct rxrpc_peer *);
 
 /*
  * proc.c
index 9d1e298b784c8b595626ec0b8f5af0f14e7e03a4..9c7f26d06a52f36d98bc78df68e682f51f8eb9e6 100644 (file)
@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
  */
 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                                                    struct rxrpc_local *local,
+                                                   struct rxrpc_peer *peer,
                                                    struct rxrpc_connection *conn,
                                                    struct sk_buff *skb)
 {
        struct rxrpc_backlog *b = rx->backlog;
-       struct rxrpc_peer *peer, *xpeer;
        struct rxrpc_call *call;
        unsigned short call_head, conn_head, peer_head;
        unsigned short call_tail, conn_tail, peer_tail;
@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                return NULL;
 
        if (!conn) {
-               /* No connection.  We're going to need a peer to start off
-                * with.  If one doesn't yet exist, use a spare from the
-                * preallocation set.  We dump the address into the spare in
-                * anticipation - and to save on stack space.
-                */
-               xpeer = b->peer_backlog[peer_tail];
-               if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
-                       return NULL;
-
-               peer = rxrpc_lookup_incoming_peer(local, xpeer);
-               if (peer == xpeer) {
+               if (peer && !rxrpc_get_peer_maybe(peer))
+                       peer = NULL;
+               if (!peer) {
+                       peer = b->peer_backlog[peer_tail];
+                       if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0)
+                               return NULL;
                        b->peer_backlog[peer_tail] = NULL;
                        smp_store_release(&b->peer_backlog_tail,
                                          (peer_tail + 1) &
                                          (RXRPC_BACKLOG_MAX - 1));
+
+                       rxrpc_new_incoming_peer(local, peer);
                }
 
                /* Now allocate and set up the connection */
@@ -335,45 +332,31 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
  * The call is returned with the user access mutex held.
  */
 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+                                          struct rxrpc_sock *rx,
+                                          struct rxrpc_peer *peer,
                                           struct rxrpc_connection *conn,
                                           struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       struct rxrpc_sock *rx;
        struct rxrpc_call *call;
-       u16 service_id = sp->hdr.serviceId;
 
        _enter("");
 
-       /* Get the socket providing the service */
-       rx = rcu_dereference(local->service);
-       if (rx && (service_id == rx->srx.srx_service ||
-                  service_id == rx->second_service))
-               goto found_service;
-
-       trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_INVALID_OPERATION, EOPNOTSUPP);
-       skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
-       skb->priority = RX_INVALID_OPERATION;
-       _leave(" = NULL [service]");
-       return NULL;
-
-found_service:
        spin_lock(&rx->incoming_lock);
        if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
            rx->sk.sk_state == RXRPC_CLOSE) {
                trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
                                  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
-               skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
                skb->priority = RX_INVALID_OPERATION;
                _leave(" = NULL [close]");
                call = NULL;
                goto out;
        }
 
-       call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
+       call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
        if (!call) {
-               skb->mark = RXRPC_SKB_MARK_BUSY;
+               skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
                _leave(" = NULL [busy]");
                call = NULL;
                goto out;
index 9486293fef5c6f98c96397fc90eb14eecf332196..799f75b6900ddc4a7a5aecf87325b355ebbbcecc 100644 (file)
@@ -400,7 +400,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
        rcu_assign_pointer(conn->channels[chan].call, call);
 
        spin_lock(&conn->params.peer->lock);
-       hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
+       hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
        spin_unlock(&conn->params.peer->lock);
 
        _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
index f8f37188a9322829b8f4277c09b7329d2f4c1da0..8acf74fe24c03646916c1b69cddf8c7be3f79d43 100644 (file)
@@ -710,8 +710,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
        }
 
        spin_lock_bh(&call->conn->params.peer->lock);
-       hlist_add_head(&call->error_link,
-                      &call->conn->params.peer->error_targets);
+       hlist_add_head_rcu(&call->error_link,
+                          &call->conn->params.peer->error_targets);
        spin_unlock_bh(&call->conn->params.peer->lock);
 
 out:
index 77440a356b14ae60e875fcd94a2613227fd899cf..885dae829f4a1a1690334f0a6a3375d34d79bffb 100644 (file)
@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  * If successful, a pointer to the connection is returned, but no ref is taken.
  * NULL is returned if there is no match.
  *
+ * When searching for a service call, if we find a peer but no connection, we
+ * return that through *_peer in case we need to create a new service call.
+ *
  * The caller must be holding the RCU read lock.
  */
 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
-                                                  struct sk_buff *skb)
+                                                  struct sk_buff *skb,
+                                                  struct rxrpc_peer **_peer)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_conn_proto k;
@@ -85,9 +89,6 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
        if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
                goto not_found;
 
-       k.epoch = sp->hdr.epoch;
-       k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
-
        /* We may have to handle mixing IPv4 and IPv6 */
        if (srx.transport.family != local->srx.transport.family) {
                pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
@@ -99,7 +100,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
        k.epoch = sp->hdr.epoch;
        k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
 
-       if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
+       if (rxrpc_to_server(sp)) {
                /* We need to look up service connections by the full protocol
                 * parameter set.  We look up the peer first as an intermediate
                 * step and then the connection from the peer's tree.
@@ -107,6 +108,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
                peer = rxrpc_lookup_peer_rcu(local, &srx);
                if (!peer)
                        goto not_found;
+               *_peer = peer;
                conn = rxrpc_find_service_conn_rcu(peer, skb);
                if (!conn || atomic_read(&conn->usage) == 0)
                        goto not_found;
@@ -214,7 +216,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
        call->peer->cong_cwnd = call->cong_cwnd;
 
        spin_lock_bh(&conn->params.peer->lock);
-       hlist_del_init(&call->error_link);
+       hlist_del_rcu(&call->error_link);
        spin_unlock_bh(&conn->params.peer->lock);
 
        if (rxrpc_is_client_call(call))
index cfdc199c63510255c1d8cd60baed3c5f66b93d28..800f5b8a1baa04ec2062a975cc501875ece9eb43 100644 (file)
@@ -622,13 +622,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
                if (!skb)
                        continue;
 
+               sent_at = skb->tstamp;
+               smp_rmb(); /* Read timestamp before serial. */
                sp = rxrpc_skb(skb);
                if (sp->hdr.serial != orig_serial)
                        continue;
-               smp_rmb();
-               sent_at = skb->tstamp;
                goto found;
        }
+
        return;
 
 found:
@@ -1124,12 +1125,14 @@ void rxrpc_data_ready(struct sock *udp_sk)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_channel *chan;
-       struct rxrpc_call *call;
+       struct rxrpc_call *call = NULL;
        struct rxrpc_skb_priv *sp;
        struct rxrpc_local *local = udp_sk->sk_user_data;
+       struct rxrpc_peer *peer = NULL;
+       struct rxrpc_sock *rx = NULL;
        struct sk_buff *skb;
        unsigned int channel;
-       int ret, skew;
+       int ret, skew = 0;
 
        _enter("%p", udp_sk);
 
@@ -1143,6 +1146,9 @@ void rxrpc_data_ready(struct sock *udp_sk)
                return;
        }
 
+       if (skb->tstamp == 0)
+               skb->tstamp = ktime_get_real();
+
        rxrpc_new_skb(skb, rxrpc_skb_rx_received);
 
        _net("recv skb %p", skb);
@@ -1177,46 +1183,75 @@ void rxrpc_data_ready(struct sock *udp_sk)
 
        trace_rxrpc_rx_packet(sp);
 
-       _net("Rx RxRPC %s ep=%x call=%x:%x",
-            sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
-            sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
-
-       if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
-           !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
-               _proto("Rx Bad Packet Type %u", sp->hdr.type);
-               goto bad_message;
-       }
-
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_VERSION:
-               if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
+               if (rxrpc_to_client(sp))
                        goto discard;
                rxrpc_post_packet_to_local(local, skb);
                goto out;
 
        case RXRPC_PACKET_TYPE_BUSY:
-               if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
+               if (rxrpc_to_server(sp))
                        goto discard;
                /* Fall through */
+       case RXRPC_PACKET_TYPE_ACK:
+       case RXRPC_PACKET_TYPE_ACKALL:
+               if (sp->hdr.callNumber == 0)
+                       goto bad_message;
+               /* Fall through */
+       case RXRPC_PACKET_TYPE_ABORT:
+               break;
 
        case RXRPC_PACKET_TYPE_DATA:
-               if (sp->hdr.callNumber == 0)
+               if (sp->hdr.callNumber == 0 ||
+                   sp->hdr.seq == 0)
                        goto bad_message;
                if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
                    !rxrpc_validate_jumbo(skb))
                        goto bad_message;
                break;
 
+       case RXRPC_PACKET_TYPE_CHALLENGE:
+               if (rxrpc_to_server(sp))
+                       goto discard;
+               break;
+       case RXRPC_PACKET_TYPE_RESPONSE:
+               if (rxrpc_to_client(sp))
+                       goto discard;
+               break;
+
                /* Packet types 9-11 should just be ignored. */
        case RXRPC_PACKET_TYPE_PARAMS:
        case RXRPC_PACKET_TYPE_10:
        case RXRPC_PACKET_TYPE_11:
                goto discard;
+
+       default:
+               _proto("Rx Bad Packet Type %u", sp->hdr.type);
+               goto bad_message;
        }
 
+       if (sp->hdr.serviceId == 0)
+               goto bad_message;
+
        rcu_read_lock();
 
-       conn = rxrpc_find_connection_rcu(local, skb);
+       if (rxrpc_to_server(sp)) {
+               /* Weed out packets to services we're not offering.  Packets
+                * that would begin a call are explicitly rejected and the rest
+                * are just discarded.
+                */
+               rx = rcu_dereference(local->service);
+               if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
+                           sp->hdr.serviceId != rx->second_service)) {
+                       if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+                           sp->hdr.seq == 1)
+                               goto unsupported_service;
+                       goto discard_unlock;
+               }
+       }
+
+       conn = rxrpc_find_connection_rcu(local, skb, &peer);
        if (conn) {
                if (sp->hdr.securityIndex != conn->security_ix)
                        goto wrong_security;
@@ -1280,7 +1315,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
                call = rcu_dereference(chan->call);
 
                if (sp->hdr.callNumber > chan->call_id) {
-                       if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
+                       if (rxrpc_to_client(sp)) {
                                rcu_read_unlock();
                                goto reject_packet;
                        }
@@ -1297,19 +1332,15 @@ void rxrpc_data_ready(struct sock *udp_sk)
                        if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
                                set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
                }
-       } else {
-               skew = 0;
-               call = NULL;
        }
 
        if (!call || atomic_read(&call->usage) == 0) {
-               if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
-                   sp->hdr.callNumber == 0 ||
+               if (rxrpc_to_client(sp) ||
                    sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
                        goto bad_message_unlock;
                if (sp->hdr.seq != 1)
                        goto discard_unlock;
-               call = rxrpc_new_incoming_call(local, conn, skb);
+               call = rxrpc_new_incoming_call(local, rx, peer, conn, skb);
                if (!call) {
                        rcu_read_unlock();
                        goto reject_packet;
@@ -1340,6 +1371,13 @@ void rxrpc_data_ready(struct sock *udp_sk)
        skb->priority = RXKADINCONSISTENCY;
        goto post_abort;
 
+unsupported_service:
+       rcu_read_unlock();
+       trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                         RX_INVALID_OPERATION, EOPNOTSUPP);
+       skb->priority = RX_INVALID_OPERATION;
+       goto post_abort;
+
 reupgrade:
        rcu_read_unlock();
        trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
@@ -1354,7 +1392,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
 protocol_error:
        skb->priority = RX_PROTOCOL_ERROR;
 post_abort:
-       skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+       skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
 reject_packet:
        trace_rxrpc_rx_done(skb->mark, skb->priority);
        rxrpc_reject_packet(local, skb);
index 777c3ed4cfc03d3923e052d95597926a1893a163..94d234e9c685fbe4324726df73800ed0f873e01b 100644 (file)
@@ -135,10 +135,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
        }
 
        switch (local->srx.transport.family) {
-       case AF_INET:
-               /* we want to receive ICMP errors */
+       case AF_INET6:
+               /* we want to receive ICMPv6 errors */
                opt = 1;
-               ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
+               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
@@ -146,19 +146,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                }
 
                /* we want to set the don't fragment bit */
-               opt = IP_PMTUDISC_DO;
-               ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
+               opt = IPV6_PMTUDISC_DO;
+               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
                        goto error;
                }
-               break;
 
-       case AF_INET6:
+               /* Fall through and set IPv4 options too otherwise we don't get
+                * errors from IPv4 packets sent through the IPv6 socket.
+                */
+
+       case AF_INET:
                /* we want to receive ICMP errors */
                opt = 1;
-               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
+               ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
@@ -166,13 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                }
 
                /* we want to set the don't fragment bit */
-               opt = IPV6_PMTUDISC_DO;
-               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
+               opt = IP_PMTUDISC_DO;
+               ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
                        goto error;
                }
+
+               /* We want receive timestamps. */
+               opt = 1;
+               ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
+                                       (char *)&opt, sizeof(opt));
+               if (ret < 0) {
+                       _debug("setsockopt failed");
+                       goto error;
+               }
                break;
 
        default:
index ccf5de160444f4f08fa44310dbf4db29bfb8846d..e8fb8922bca838d145ca2c83a145ad5050aae6ea 100644 (file)
@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        struct kvec iov[2];
        rxrpc_serial_t serial;
        rxrpc_seq_t hard_ack, top;
-       ktime_t now;
        size_t len, n;
        int ret;
        u8 reason;
@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
                /* We need to stick a time in before we send the packet in case
                 * the reply gets back before kernel_sendmsg() completes - but
                 * asking UDP to send the packet can take a relatively long
-                * time, so we update the time after, on the assumption that
-                * the packet transmission is more likely to happen towards the
-                * end of the kernel_sendmsg() call.
+                * time.
                 */
                call->ping_time = ktime_get_real();
                set_bit(RXRPC_CALL_PINGING, &call->flags);
@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        }
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
-       now = ktime_get_real();
-       if (ping)
-               call->ping_time = now;
        conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
@@ -363,8 +357,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
 
        /* If our RTT cache needs working on, request an ACK.  Also request
         * ACKs if a DATA packet appears to have been lost.
+        *
+        * However, we mustn't request an ACK on the last reply packet of a
+        * service call, lest OpenAFS incorrectly send us an ACK with some
+        * soft-ACKs in it and then never follow up with a proper hard ACK.
         */
-       if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
+       if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
+            rxrpc_to_server(sp)
+            ) &&
            (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
             retrans ||
             call->cong_mode == RXRPC_CALL_SLOW_START ||
@@ -390,6 +390,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                goto send_fragmentable;
 
        down_read(&conn->params.local->defrag_sem);
+
+       sp->hdr.serial = serial;
+       smp_wmb(); /* Set serial before timestamp */
+       skb->tstamp = ktime_get_real();
+
        /* send the packet by UDP
         * - returns -EMSGSIZE if UDP would have to fragment the packet
         *   to go out of the interface
@@ -413,12 +418,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
        trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
                            retrans, lost);
        if (ret >= 0) {
-               ktime_t now = ktime_get_real();
-               skb->tstamp = now;
-               smp_wmb();
-               sp->hdr.serial = serial;
                if (whdr.flags & RXRPC_REQUEST_ACK) {
-                       call->peer->rtt_last_req = now;
+                       call->peer->rtt_last_req = skb->tstamp;
                        trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
                        if (call->peer->rtt_usage > 1) {
                                unsigned long nowj = jiffies, ack_lost_at;
@@ -457,6 +458,10 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
 
        down_write(&conn->params.local->defrag_sem);
 
+       sp->hdr.serial = serial;
+       smp_wmb(); /* Set serial before timestamp */
+       skb->tstamp = ktime_get_real();
+
        switch (conn->params.local->srx.transport.family) {
        case AF_INET:
                opt = IP_PMTUDISC_DONT;
@@ -519,7 +524,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        struct kvec iov[2];
        size_t size;
        __be32 code;
-       int ret;
+       int ret, ioc;
 
        _enter("%d", local->debug_id);
 
@@ -527,7 +532,6 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        iov[0].iov_len = sizeof(whdr);
        iov[1].iov_base = &code;
        iov[1].iov_len = sizeof(code);
-       size = sizeof(whdr) + sizeof(code);
 
        msg.msg_name = &srx.transport;
        msg.msg_control = NULL;
@@ -535,17 +539,31 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        msg.msg_flags = 0;
 
        memset(&whdr, 0, sizeof(whdr));
-       whdr.type = RXRPC_PACKET_TYPE_ABORT;
 
        while ((skb = skb_dequeue(&local->reject_queue))) {
                rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
                sp = rxrpc_skb(skb);
 
+               switch (skb->mark) {
+               case RXRPC_SKB_MARK_REJECT_BUSY:
+                       whdr.type = RXRPC_PACKET_TYPE_BUSY;
+                       size = sizeof(whdr);
+                       ioc = 1;
+                       break;
+               case RXRPC_SKB_MARK_REJECT_ABORT:
+                       whdr.type = RXRPC_PACKET_TYPE_ABORT;
+                       code = htonl(skb->priority);
+                       size = sizeof(whdr) + sizeof(code);
+                       ioc = 2;
+                       break;
+               default:
+                       rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+                       continue;
+               }
+
                if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) {
                        msg.msg_namelen = srx.transport_len;
 
-                       code = htonl(skb->priority);
-
                        whdr.epoch      = htonl(sp->hdr.epoch);
                        whdr.cid        = htonl(sp->hdr.cid);
                        whdr.callNumber = htonl(sp->hdr.callNumber);
index 4f9da2f51c694c3f93d3883476057377664b80e7..f3e6fc670da2339998992f0f0904e1f0b767ddd1 100644 (file)
@@ -23,6 +23,8 @@
 #include "ar-internal.h"
 
 static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
+static void rxrpc_distribute_error(struct rxrpc_peer *, int,
+                                  enum rxrpc_call_completion);
 
 /*
  * Find the peer associated with an ICMP packet.
@@ -194,8 +196,6 @@ void rxrpc_error_report(struct sock *sk)
        rcu_read_unlock();
        rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
 
-       /* The ref we obtained is passed off to the work item */
-       __rxrpc_queue_peer_error(peer);
        _leave("");
 }
 
@@ -205,6 +205,7 @@ void rxrpc_error_report(struct sock *sk)
 static void rxrpc_store_error(struct rxrpc_peer *peer,
                              struct sock_exterr_skb *serr)
 {
+       enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
        struct sock_extended_err *ee;
        int err;
 
@@ -255,7 +256,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
        case SO_EE_ORIGIN_NONE:
        case SO_EE_ORIGIN_LOCAL:
                _proto("Rx Received local error { error=%d }", err);
-               err += RXRPC_LOCAL_ERROR_OFFSET;
+               compl = RXRPC_CALL_LOCAL_ERROR;
                break;
 
        case SO_EE_ORIGIN_ICMP6:
@@ -264,48 +265,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
                break;
        }
 
-       peer->error_report = err;
+       rxrpc_distribute_error(peer, err, compl);
 }
 
 /*
- * Distribute an error that occurred on a peer
+ * Distribute an error that occurred on a peer.
  */
-void rxrpc_peer_error_distributor(struct work_struct *work)
+static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
+                                  enum rxrpc_call_completion compl)
 {
-       struct rxrpc_peer *peer =
-               container_of(work, struct rxrpc_peer, error_distributor);
        struct rxrpc_call *call;
-       enum rxrpc_call_completion compl;
-       int error;
-
-       _enter("");
-
-       error = READ_ONCE(peer->error_report);
-       if (error < RXRPC_LOCAL_ERROR_OFFSET) {
-               compl = RXRPC_CALL_NETWORK_ERROR;
-       } else {
-               compl = RXRPC_CALL_LOCAL_ERROR;
-               error -= RXRPC_LOCAL_ERROR_OFFSET;
-       }
 
-       _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error);
-
-       spin_lock_bh(&peer->lock);
-
-       while (!hlist_empty(&peer->error_targets)) {
-               call = hlist_entry(peer->error_targets.first,
-                                  struct rxrpc_call, error_link);
-               hlist_del_init(&call->error_link);
+       hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
                rxrpc_see_call(call);
-
-               if (rxrpc_set_call_completion(call, compl, 0, -error))
+               if (call->state < RXRPC_CALL_COMPLETE &&
+                   rxrpc_set_call_completion(call, compl, 0, -error))
                        rxrpc_notify_socket(call);
        }
-
-       spin_unlock_bh(&peer->lock);
-
-       rxrpc_put_peer(peer);
-       _leave("");
 }
 
 /*
index 1dc7648e3eff34f25ceea7b0edbd74b5f8cd02b3..01a9febfa36714da7293c1b9b5a5235d0947f8d0 100644 (file)
@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
        struct rxrpc_net *rxnet = local->rxnet;
 
        hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
-               if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
-                       if (atomic_read(&peer->usage) == 0)
-                               return NULL;
+               if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
+                   atomic_read(&peer->usage) > 0)
                        return peer;
-               }
        }
 
        return NULL;
@@ -222,8 +220,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
                atomic_set(&peer->usage, 1);
                peer->local = local;
                INIT_HLIST_HEAD(&peer->error_targets);
-               INIT_WORK(&peer->error_distributor,
-                         &rxrpc_peer_error_distributor);
                peer->service_conns = RB_ROOT;
                seqlock_init(&peer->service_conn_lock);
                spin_lock_init(&peer->lock);
@@ -299,34 +295,23 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
 }
 
 /*
- * Set up a new incoming peer.  The address is prestored in the preallocated
- * peer.
+ * Set up a new incoming peer.  There shouldn't be any other matching peers
+ * since we've already done a search in the list from the non-reentrant context
+ * (the data_ready handler) that is the only place we can add new peers.
  */
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
-                                             struct rxrpc_peer *prealloc)
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
 {
-       struct rxrpc_peer *peer;
        struct rxrpc_net *rxnet = local->rxnet;
        unsigned long hash_key;
 
-       hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
-       prealloc->local = local;
-       rxrpc_init_peer(prealloc, hash_key);
+       hash_key = rxrpc_peer_hash_key(local, &peer->srx);
+       peer->local = local;
+       rxrpc_init_peer(peer, hash_key);
 
        spin_lock(&rxnet->peer_hash_lock);
-
-       /* Need to check that we aren't racing with someone else */
-       peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
-       if (peer && !rxrpc_get_peer_maybe(peer))
-               peer = NULL;
-       if (!peer) {
-               peer = prealloc;
-               hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
-               list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
-       }
-
+       hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
+       list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
        spin_unlock(&rxnet->peer_hash_lock);
-       return peer;
 }
 
 /*
@@ -415,21 +400,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
        return peer;
 }
 
-/*
- * Queue a peer record.  This passes the caller's ref to the workqueue.
- */
-void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
-{
-       const void *here = __builtin_return_address(0);
-       int n;
-
-       n = atomic_read(&peer->usage);
-       if (rxrpc_queue_work(&peer->error_distributor))
-               trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
-       else
-               rxrpc_put_peer(peer);
-}
-
 /*
  * Discard a peer record.
  */
index 93da73bf709857bbd48b2859092175bf43df8dfd..f9cb83c938f35d4ad8e381658da53e8f2494ff6a 100644 (file)
@@ -50,7 +50,6 @@ struct rxrpc_wire_header {
 #define RXRPC_PACKET_TYPE_10           10      /* Ignored */
 #define RXRPC_PACKET_TYPE_11           11      /* Ignored */
 #define RXRPC_PACKET_TYPE_VERSION      13      /* version string request */
-#define RXRPC_N_PACKET_TYPES           14      /* number of packet types (incl type 0) */
 
        uint8_t         flags;          /* packet flags */
 #define RXRPC_CLIENT_INITIATED 0x01            /* signifies a packet generated by a client */
@@ -72,20 +71,6 @@ struct rxrpc_wire_header {
 
 } __packed;
 
-#define RXRPC_SUPPORTED_PACKET_TYPES (                 \
-               (1 << RXRPC_PACKET_TYPE_DATA) |         \
-               (1 << RXRPC_PACKET_TYPE_ACK) |          \
-               (1 << RXRPC_PACKET_TYPE_BUSY) |         \
-               (1 << RXRPC_PACKET_TYPE_ABORT) |        \
-               (1 << RXRPC_PACKET_TYPE_ACKALL) |       \
-               (1 << RXRPC_PACKET_TYPE_CHALLENGE) |    \
-               (1 << RXRPC_PACKET_TYPE_RESPONSE) |     \
-               /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */   \
-               (1 << RXRPC_PACKET_TYPE_PARAMS) |       \
-               (1 << RXRPC_PACKET_TYPE_10) |           \
-               (1 << RXRPC_PACKET_TYPE_11) |           \
-               (1 << RXRPC_PACKET_TYPE_VERSION))
-
 /*****************************************************************************/
 /*
  * jumbo packet secondary header
index 23273b5303fd9dcc68cf09ee6f701defe50580b5..8525de8116163a05257753aa00117e20a8b2cc42 100644 (file)
@@ -135,7 +135,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
        }
 
        td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
-       if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+       if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
                if (exists)
                        tcf_idr_release(*a, bind);
                else
index 98541c6399db53f5d8ae46aee0f17cad7e0a127e..85e73f48e48ff89ba0e29bd2bddd2ba1c96df271 100644 (file)
@@ -1311,6 +1311,18 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
  * Delete/get qdisc.
  */
 
+const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
+       [TCA_KIND]              = { .type = NLA_STRING },
+       [TCA_OPTIONS]           = { .type = NLA_NESTED },
+       [TCA_RATE]              = { .type = NLA_BINARY,
+                                   .len = sizeof(struct tc_estimator) },
+       [TCA_STAB]              = { .type = NLA_NESTED },
+       [TCA_DUMP_INVISIBLE]    = { .type = NLA_FLAG },
+       [TCA_CHAIN]             = { .type = NLA_U32 },
+       [TCA_INGRESS_BLOCK]     = { .type = NLA_U32 },
+       [TCA_EGRESS_BLOCK]      = { .type = NLA_U32 },
+};
+
 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
                        struct netlink_ext_ack *extack)
 {
@@ -1327,7 +1339,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
            !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
-       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+                         extack);
        if (err < 0)
                return err;
 
@@ -1411,7 +1424,8 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 
 replay:
        /* Reinit, just in case something touches this. */
-       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+                         extack);
        if (err < 0)
                return err;
 
@@ -1645,7 +1659,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
        idx = 0;
        ASSERT_RTNL();
 
-       err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL);
+       err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
+                         rtm_tca_policy, NULL);
        if (err < 0)
                return err;
 
@@ -1864,7 +1879,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
            !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
-       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+                         extack);
        if (err < 0)
                return err;
 
index d74d00b299421a940766f157bf8b48038b827cd1..42191ed9902b8dd38ad41b6221bd4210427b193b 100644 (file)
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
                if (!ctx->packet || !ctx->packet->has_cookie_echo)
                        return;
 
-               /* fallthru */
+               /* fall through */
        case SCTP_STATE_ESTABLISHED:
        case SCTP_STATE_SHUTDOWN_PENDING:
        case SCTP_STATE_SHUTDOWN_RECEIVED:
index 418f03d0be90f076cf34b1ee96495ceb3a3d68de..645c160520529271cc00e9df50ba086a4feb7fbc 100644 (file)
@@ -609,16 +609,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
 
        switch (evt) {
        case NETDEV_CHANGE:
-               if (netif_carrier_ok(dev))
+               if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
+                       test_and_set_bit_lock(0, &b->up);
                        break;
-               /* else: fall through */
-       case NETDEV_UP:
-               test_and_set_bit_lock(0, &b->up);
-               break;
+               }
+               /* fall through */
        case NETDEV_GOING_DOWN:
                clear_bit_unlock(0, &b->up);
                tipc_reset_bearer(net, b);
                break;
+       case NETDEV_UP:
+               test_and_set_bit_lock(0, &b->up);
+               break;
        case NETDEV_CHANGEMTU:
                if (tipc_mtu_bad(dev, 0)) {
                        bearer_disable(net, b);
index b1f0bee54eacc9eb1974169853abf1ace4df2733..fb886b525d950e18f7ef517bac408272d17e8d4e 100644 (file)
@@ -410,6 +410,11 @@ char *tipc_link_name(struct tipc_link *l)
        return l->name;
 }
 
+u32 tipc_link_state(struct tipc_link *l)
+{
+       return l->state;
+}
+
 /**
  * tipc_link_create - create a new link
  * @n: pointer to associated node
@@ -841,9 +846,14 @@ void tipc_link_reset(struct tipc_link *l)
        l->in_session = false;
        l->session++;
        l->mtu = l->advertised_mtu;
+       spin_lock_bh(&l->wakeupq.lock);
+       spin_lock_bh(&l->inputq->lock);
+       skb_queue_splice_init(&l->wakeupq, l->inputq);
+       spin_unlock_bh(&l->inputq->lock);
+       spin_unlock_bh(&l->wakeupq.lock);
+
        __skb_queue_purge(&l->transmq);
        __skb_queue_purge(&l->deferdq);
-       skb_queue_splice_init(&l->wakeupq, l->inputq);
        __skb_queue_purge(&l->backlogq);
        l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
        l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
@@ -1380,6 +1390,36 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
        __skb_queue_tail(xmitq, skb);
 }
 
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
+                                   struct sk_buff_head *xmitq)
+{
+       u32 onode = tipc_own_addr(l->net);
+       struct tipc_msg *hdr, *ihdr;
+       struct sk_buff_head tnlq;
+       struct sk_buff *skb;
+       u32 dnode = l->addr;
+
+       skb_queue_head_init(&tnlq);
+       skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
+                             INT_H_SIZE, BASIC_H_SIZE,
+                             dnode, onode, 0, 0, 0);
+       if (!skb) {
+               pr_warn("%sunable to create tunnel packet\n", link_co_err);
+               return;
+       }
+
+       hdr = buf_msg(skb);
+       msg_set_msgcnt(hdr, 1);
+       msg_set_bearer_id(hdr, l->peer_bearer_id);
+
+       ihdr = (struct tipc_msg *)msg_data(hdr);
+       tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+                     BASIC_H_SIZE, dnode);
+       msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
+       __skb_queue_tail(&tnlq, skb);
+       tipc_link_xmit(l, &tnlq, xmitq);
+}
+
 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
  * with contents of the link's transmit and backlog queues.
  */
@@ -1476,6 +1516,9 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
                        return false;
                if (session != curr_session)
                        return false;
+               /* Extra sanity check */
+               if (!link_is_up(l) && msg_ack(hdr))
+                       return false;
                if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
                        return true;
                /* Accept only STATE with new sequence number */
index 7bc494a33fdf1c3cdf8feb04b44db7e6e04a349c..90488c538a4e4edaddfaf441a517f8b29c1d2ebc 100644 (file)
@@ -88,6 +88,8 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
                         struct tipc_link **link);
 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
                           int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
+                                   struct sk_buff_head *xmitq);
 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
 int tipc_link_fsm_evt(struct tipc_link *l, int evt);
 bool tipc_link_is_up(struct tipc_link *l);
@@ -107,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
 u16 tipc_link_acked(struct tipc_link *l);
 u32 tipc_link_id(struct tipc_link *l);
 char *tipc_link_name(struct tipc_link *l);
+u32 tipc_link_state(struct tipc_link *l);
 char tipc_link_plane(struct tipc_link *l);
 int tipc_link_prio(struct tipc_link *l);
 int tipc_link_window(struct tipc_link *l);
index 68014f1b69765269236ac0a6d839ca754d1235da..2afc4f8c37a74db4896508283f434909a0151732 100644 (file)
@@ -111,6 +111,7 @@ struct tipc_node {
        int action_flags;
        struct list_head list;
        int state;
+       bool failover_sent;
        u16 sync_point;
        int link_cnt;
        u16 working_links;
@@ -680,6 +681,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
                *slot0 = bearer_id;
                *slot1 = bearer_id;
                tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+               n->failover_sent = false;
                n->action_flags |= TIPC_NOTIFY_NODE_UP;
                tipc_link_set_active(nl, true);
                tipc_bcast_add_peer(n->net, nl, xmitq);
@@ -911,6 +913,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        bool reset = true;
        char *if_name;
        unsigned long intv;
+       u16 session;
 
        *dupl_addr = false;
        *respond = false;
@@ -997,9 +1000,10 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                        goto exit;
 
                if_name = strchr(b->name, ':') + 1;
+               get_random_bytes(&session, sizeof(u16));
                if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
                                      b->net_plane, b->mtu, b->priority,
-                                     b->window, mod(tipc_net(net)->random),
+                                     b->window, session,
                                      tipc_own_addr(net), addr, peer_id,
                                      n->capabilities,
                                      tipc_bc_sndlink(n->net), n->bc_entry.link,
@@ -1615,6 +1619,14 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
                        tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
                                                        tipc_link_inputq(l));
                }
+               /* If parallel link was already down, and this happened before
+                * the tunnel link came up, FAILOVER was never sent. Ensure that
+                * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
+                */
+               if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
+                       tipc_link_create_dummy_tnl_msg(l, xmitq);
+                       n->failover_sent = true;
+               }
                /* If pkts arrive out of order, use lowest calculated syncpt */
                if (less(syncpt, n->sync_point))
                        n->sync_point = syncpt;
index 3f03ddd0e35b2f1b6acad1c788faee1976924b3b..b6f99b021d09b19a6fe0d6776d2c824a913cdacb 100644 (file)
@@ -1419,8 +1419,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
        /* Handle implicit connection setup */
        if (unlikely(dest)) {
                rc = __tipc_sendmsg(sock, m, dlen);
-               if (dlen && (dlen == rc))
+               if (dlen && dlen == rc) {
+                       tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
                        tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
+               }
                return rc;
        }
 
index 4b8ec659e797ff743267773e315c6220b90993d0..176edfefcbaa89d826c322eb52cfc4e12129dbd8 100644 (file)
@@ -3756,6 +3756,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
                        return false;
 
                /* check availability */
+               ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
                if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
                        mcs[ridx] |= rbit;
                else
@@ -10230,7 +10231,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        s32 last, low, high;
        u32 hyst;
-       int i, n;
+       int i, n, low_index;
        int err;
 
        /* RSSI reporting disabled? */
@@ -10267,10 +10268,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
                if (last < wdev->cqm_config->rssi_thresholds[i])
                        break;
 
-       low = i > 0 ?
-               (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN;
-       high = i < n ?
-               (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX;
+       low_index = i - 1;
+       if (low_index >= 0) {
+               low_index = array_index_nospec(low_index, n);
+               low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
+       } else {
+               low = S32_MIN;
+       }
+       if (i < n) {
+               i = array_index_nospec(i, n);
+               high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
+       } else {
+               high = S32_MAX;
+       }
 
        return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
 }
index 2f702adf2912105947560d07d79ff86119333bbf..24cfa2776f50b4b53220e23fe7e6a29b330f0028 100644 (file)
@@ -2661,11 +2661,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
 {
        struct wiphy *wiphy = NULL;
        enum reg_request_treatment treatment;
+       enum nl80211_reg_initiator initiator = reg_request->initiator;
 
        if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
                wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
 
-       switch (reg_request->initiator) {
+       switch (initiator) {
        case NL80211_REGDOM_SET_BY_CORE:
                treatment = reg_process_hint_core(reg_request);
                break;
@@ -2683,7 +2684,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
                treatment = reg_process_hint_country_ie(wiphy, reg_request);
                break;
        default:
-               WARN(1, "invalid initiator %d\n", reg_request->initiator);
+               WARN(1, "invalid initiator %d\n", initiator);
                goto out_free;
        }
 
@@ -2698,7 +2699,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
         */
        if (treatment == REG_REQ_ALREADY_SET && wiphy &&
            wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
-               wiphy_update_regulatory(wiphy, reg_request->initiator);
+               wiphy_update_regulatory(wiphy, initiator);
                wiphy_all_share_dfs_chan_state(wiphy);
                reg_check_channels();
        }
@@ -2867,6 +2868,7 @@ static int regulatory_hint_core(const char *alpha2)
        request->alpha2[0] = alpha2[0];
        request->alpha2[1] = alpha2[1];
        request->initiator = NL80211_REGDOM_SET_BY_CORE;
+       request->wiphy_idx = WIPHY_IDX_INVALID;
 
        queue_regulatory_request(request);
 
index d36c3eb7b9311fc75bdaa020aa0318546efd4128..d0e7472dd9fd4b2a8938334129f24a60ea3fb421 100644 (file)
@@ -1058,13 +1058,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
        return NULL;
 }
 
+/*
+ * Update RX channel information based on the available frame payload
+ * information. This is mainly for the 2.4 GHz band where frames can be received
+ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
+ * element to indicate the current (transmitting) channel, but this might also
+ * be needed on other bands if RX frequency does not match with the actual
+ * operating channel of a BSS.
+ */
 static struct ieee80211_channel *
 cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
-                        struct ieee80211_channel *channel)
+                        struct ieee80211_channel *channel,
+                        enum nl80211_bss_scan_width scan_width)
 {
        const u8 *tmp;
        u32 freq;
        int channel_number = -1;
+       struct ieee80211_channel *alt_channel;
 
        tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
        if (tmp && tmp[1] == 1) {
@@ -1078,16 +1088,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
                }
        }
 
-       if (channel_number < 0)
+       if (channel_number < 0) {
+               /* No channel information in frame payload */
                return channel;
+       }
 
        freq = ieee80211_channel_to_frequency(channel_number, channel->band);
-       channel = ieee80211_get_channel(wiphy, freq);
-       if (!channel)
-               return NULL;
-       if (channel->flags & IEEE80211_CHAN_DISABLED)
+       alt_channel = ieee80211_get_channel(wiphy, freq);
+       if (!alt_channel) {
+               if (channel->band == NL80211_BAND_2GHZ) {
+                       /*
+                        * Better not allow unexpected channels when that could
+                        * be going beyond the 1-11 range (e.g., discovering
+                        * BSS on channel 12 when radio is configured for
+                        * channel 11.
+                        */
+                       return NULL;
+               }
+
+               /* No match for the payload channel number - ignore it */
+               return channel;
+       }
+
+       if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
+           scan_width == NL80211_BSS_CHAN_WIDTH_5) {
+               /*
+                * Ignore channel number in 5 and 10 MHz channels where there
+                * may not be an n:1 or 1:n mapping between frequencies and
+                * channel numbers.
+                */
+               return channel;
+       }
+
+       /*
+        * Use the channel determined through the payload channel number
+        * instead of the RX channel reported by the driver.
+        */
+       if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
                return NULL;
-       return channel;
+       return alt_channel;
 }
 
 /* Returned bss is reference counted and must be cleaned up appropriately. */
@@ -1112,7 +1151,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
                    (data->signal < 0 || data->signal > 100)))
                return NULL;
 
-       channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
+       channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
+                                          data->scan_width);
        if (!channel)
                return NULL;
 
@@ -1210,7 +1250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
                return NULL;
 
        channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
-                                          ielen, data->chan);
+                                          ielen, data->chan, data->scan_width);
        if (!channel)
                return NULL;
 
index 167f7025ac98288acbd57cd4627b1eb7fb2f6520..06943d9c983522d499395f733f000d8630aa04d0 100644 (file)
@@ -1278,12 +1278,16 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
        if (err)
                return err;
 
-       if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)))
-               return -EOPNOTSUPP;
+       if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
+               err = -EOPNOTSUPP;
+               goto free;
+       }
 
        rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
 
-       return 0;
+free:
+       cfg80211_sinfo_release_content(&sinfo);
+       return err;
 }
 
 /* Get wireless statistics.  Called by /proc/net/wireless and by SIOCGIWSTATS */
@@ -1293,7 +1297,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        /* we are under RTNL - globally locked - so can use static structs */
        static struct iw_statistics wstats;
-       static struct station_info sinfo;
+       static struct station_info sinfo = {};
        u8 bssid[ETH_ALEN];
 
        if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
@@ -1352,6 +1356,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
        if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))
                wstats.discard.retries = sinfo.tx_failed;
 
+       cfg80211_sinfo_release_content(&sinfo);
+
        return &wstats;
 }
 
index b89c9c7f8c5c12a13772ae4d838ce9f121bd51f5..be3520e429c9f989a712f7bf32874bed7d3aa667 100644 (file)
@@ -458,6 +458,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
                        goto drop;
                }
+               crypto_done = false;
        } while (!err);
 
        err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
index 45ba07ab3e4f8d322e564c902774706ec09bcf9c..261995d37ced3a8f0c3ae443a4fca7f511e1a3c4 100644 (file)
@@ -100,6 +100,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
                spin_unlock_bh(&x->lock);
 
                skb_dst_force(skb);
+               if (!skb_dst(skb)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+                       goto error_nolock;
+               }
 
                if (xfrm_offload(skb)) {
                        x->type_offload->encap(x, skb);
index 3110c3fbee2099e7a4563a99c988e5ad66d0658c..f094d4b3520d97773b87baf5700df79fc8ca4666 100644 (file)
@@ -2491,6 +2491,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
        }
 
        skb_dst_force(skb);
+       if (!skb_dst(skb)) {
+               XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
+               return 0;
+       }
 
        dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
        if (IS_ERR(dst)) {
index 4791aa8b818583b5fcb5812fd561342fbab2edfa..df7ca2dabc48881eb607089bb17a19b1bc6028b1 100644 (file)
@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
        err = -EINVAL;
        switch (p->family) {
        case AF_INET:
+               if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+                       goto out;
+
                break;
 
        case AF_INET6:
 #if IS_ENABLED(CONFIG_IPV6)
+               if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+                       goto out;
+
                break;
 #else
                err = -EAFNOSUPPORT;
@@ -1396,10 +1402,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
 
        switch (p->sel.family) {
        case AF_INET:
+               if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+                       return -EINVAL;
+
                break;
 
        case AF_INET6:
 #if IS_ENABLED(CONFIG_IPV6)
+               if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+                       return -EINVAL;
+
                break;
 #else
                return  -EAFNOSUPPORT;
@@ -1480,6 +1492,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                    (ut[i].family != prev_family))
                        return -EINVAL;
 
+               if (ut[i].mode >= XFRM_MODE_MAX)
+                       return -EINVAL;
+
                prev_family = ut[i].family;
 
                switch (ut[i].family) {
index b5282cbbe48981d350f396525013fc571cdf62fa..617ff1aa818f991a01091a9e03aa7952978c0b92 100644 (file)
@@ -145,9 +145,11 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        if (!acomp->ops) {
                request_module("i915");
                /* 10s timeout */
-               wait_for_completion_timeout(&bind_complete, 10 * 1000);
+               wait_for_completion_timeout(&bind_complete,
+                                           msecs_to_jiffies(10 * 1000));
        }
        if (!acomp->ops) {
+               dev_info(bus->dev, "couldn't bind with audio component\n");
                snd_hdac_acomp_exit(bus);
                return -ENODEV;
        }
index 1d117f00d04d5620e767500a00f478fa9fc2b94a..3ac7ba9b342d24dd105cd828456715a18b7a8709 100644 (file)
@@ -6409,6 +6409,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
        SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
index d78aed86af09b61ab27d476f850df1da52669ac4..8ff8cb1a11f449c11f7483180d8492c66344d996 100644 (file)
@@ -234,6 +234,7 @@ int main(int argc, char *argv[])
                        break;
 
                default:
+                       error = HV_E_FAIL;
                        syslog(LOG_ERR, "Unknown operation: %d",
                                buffer.hdr.operation);
 
index 439b8a27488d371fe323f879ba225650df23a359..195ba486640f9b8797981958bc221dd2102306e8 100755 (executable)
@@ -1325,7 +1325,7 @@ class Tui(object):
         msg = ''
         while True:
             self.screen.erase()
-            self.screen.addstr(0, 0, 'Set update interval (defaults to %fs).' %
+            self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).' %
                                DELAY_DEFAULT, curses.A_BOLD)
             self.screen.addstr(4, 0, msg)
             self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
index 93baacab7693c48576e4872c8d31287931b73091..d056486f49de5eacad70b4813287094e5e597927 100644 (file)
@@ -1,5 +1,6 @@
 TEST_GEN_PROGS := copy_first_unaligned alignment_handler
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c ../utils.c
index b4d7432a0ecd1b4af5fa5fe2071276172a684dc5..d40300a65b42f79ba4c48705d909906ed9115dd0 100644 (file)
@@ -4,6 +4,7 @@ TEST_GEN_FILES := exec_target
 
 CFLAGS += -O2
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 1be547434a49c3feb8f7e40c03d4b5260a3b30d4..ede4d3dae7505ef31f822bac9b613aef464f6125 100644 (file)
@@ -5,6 +5,7 @@ all: $(TEST_PROGS)
 
 $(TEST_PROGS): ../harness.c ../utils.c
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 clean:
index 1cf89a34d97ca35299bf62b588b07d61ecbbb8a8..44574f3818b3d71d51a2021412ca70420b4543e2 100644 (file)
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \
 
 EXTRA_SOURCES := validate.c ../harness.c stubs.S
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/copyuser_64_t%:      copyuser_64.S $(EXTRA_SOURCES)
index 55d7db7a616bcd7661fceeb470ecb6f153cd9eac..5df476364b4d46dd1563889bbd86864622c29ffd 100644 (file)
@@ -3,6 +3,7 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test   \
              dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test  \
              dscr_sysfs_thread_test
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/dscr_default_test: LDLIBS += -lpthread
index 0dd3a01fdab92bc887ddc70cda06fd20c89c5154..11a10d7a2bbd9f1c93ccbf309ac5d0dc9d688f2b 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 8ebbe96d80a8452575fb234b6eba23a19e2c3a4e..33ced6e0ad25e07047e19699c90303ed97d157fa 100644 (file)
@@ -5,6 +5,7 @@ noarg:
 TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors
 TEST_GEN_FILES := tempfile
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 6e1629bf5b09dbfb850acdb0f0c60aafdc6e3ea6..19046db995fee387a77f9fd2f2d130078c37cb71 100644 (file)
@@ -5,6 +5,7 @@ noarg:
 TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes
 EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 all: $(TEST_GEN_PROGS) ebb
index c4e64bc2e2650a2e02fac4a6f891e973a126eced..bd5dfa509272a75b97b1dbf8ac2be00c0a1ab1cc 100644 (file)
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test   \
         lost_exception_test no_handler_test                    \
         cycles_with_mmcr2_test
 
+top_srcdir = ../../../../../..
 include ../../../lib.mk
 
 $(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \
index 175366db7be8bc1261194ac80d009cd33e0c9c59..ea2b7bd09e369c4fb679d1baeab71e80712af186 100644 (file)
@@ -2,6 +2,7 @@ CFLAGS += -I$(CURDIR)
 
 TEST_GEN_PROGS := load_unaligned_zeropad
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 28f5b781a553f4899e004e5d1450ab4b92de3407..923d531265f8c22d3adf2442e0a48fc7c1fffb5d 100644 (file)
@@ -4,6 +4,7 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
               ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
               perf-hwbreak
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 all: $(TEST_PROGS)
index a7cbd5082e27175822bb2d351b7ebdbf9c0e345d..1fca25c6ace067ffb7a913508b4e13059cb04770 100644 (file)
@@ -8,6 +8,7 @@ $(TEST_PROGS): ../harness.c ../utils.c signal.S
 CFLAGS += -maltivec
 signal_tm: CFLAGS += -mhtm
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 clean:
index 10b35c87a4f4e316315c977b09e2b3bc6ef2b73c..7fc0623d85c314636be8cd3d393fb6c35c643edb 100644 (file)
@@ -29,6 +29,7 @@ endif
 
 ASFLAGS = $(CFLAGS)
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): $(EXTRA_SOURCES)
index 30b8ff8fb82e7a161759bf68363e69dcbcde2304..fcd2dcb8972babf90209b699307bd086f08c5f90 100644 (file)
@@ -5,6 +5,7 @@ ASFLAGS += -O2 -Wall -g -nostdlib -m64
 
 EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
index da22ca7c38c185a5bb4df90a81fa71a9fbccc6c1..161b8846336fdb324f97833ce821babd9c139eae 100644 (file)
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := ipc_unmuxed
 
 CFLAGS += -I../../../../../usr/include
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index c0e45d2dde25d115b73ae2e14002a8b24167abbc..9fc2cf6fbc92c9214f9978dec423c23147ca359b 100644 (file)
@@ -6,6 +6,7 @@ TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack
        tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
        $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c ../utils.c
index f8ced26748f84408d902cc4ce58012874c545a58..fb82068c9fda297e1c505c440d7cbe112119b581 100644 (file)
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := test-vphn
 
 CFLAGS += -m64
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 642d4e12abea2e37d92e0acea3fdcf692a64d374..eec2663261f2ac8bfc09085049ea0d908f8851c5 100644 (file)
@@ -56,15 +56,13 @@ unsigned int yield_mod_cnt, nr_abort;
                        printf(fmt, ## __VA_ARGS__);    \
        } while (0)
 
-#if defined(__x86_64__) || defined(__i386__)
+#ifdef __i386__
 
 #define INJECT_ASM_REG "eax"
 
 #define RSEQ_INJECT_CLOBBER \
        , INJECT_ASM_REG
 
-#ifdef __i386__
-
 #define RSEQ_INJECT_ASM(n) \
        "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \
        "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
@@ -76,9 +74,16 @@ unsigned int yield_mod_cnt, nr_abort;
 
 #elif defined(__x86_64__)
 
+#define INJECT_ASM_REG_P       "rax"
+#define INJECT_ASM_REG         "eax"
+
+#define RSEQ_INJECT_CLOBBER \
+       , INJECT_ASM_REG_P \
+       , INJECT_ASM_REG
+
 #define RSEQ_INJECT_ASM(n) \
-       "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG "\n\t" \
-       "mov (%%" INJECT_ASM_REG "), %%" INJECT_ASM_REG "\n\t" \
+       "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \
+       "mov (%%" INJECT_ASM_REG_P "), %%" INJECT_ASM_REG "\n\t" \
        "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
        "jz 333f\n\t" \
        "222:\n\t" \
@@ -86,10 +91,6 @@ unsigned int yield_mod_cnt, nr_abort;
        "jnz 222b\n\t" \
        "333:\n\t"
 
-#else
-#error "Unsupported architecture"
-#endif
-
 #elif defined(__s390__)
 
 #define RSEQ_INJECT_INPUT \
index 2352590117042ebf79ddd946a00d8c49d38d786e..35edd61d1663eb6fc0378f606638b7b2cc0e26cb 100644 (file)
@@ -17,6 +17,7 @@
 #include <errno.h>
 #include <sched.h>
 #include <stdbool.h>
+#include <limits.h>
 
 #ifndef SYS_getcpu
 # ifdef __x86_64__
 
 int nerrs = 0;
 
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+
+vgettime_t vdso_clock_gettime;
+
+typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz);
+
+vgtod_t vdso_gettimeofday;
+
 typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
 
 getcpu_t vgetcpu;
@@ -95,6 +104,15 @@ static void fill_function_pointers()
                printf("Warning: failed to find getcpu in vDSO\n");
 
        vgetcpu = (getcpu_t) vsyscall_getcpu();
+
+       vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+       if (!vdso_clock_gettime)
+               printf("Warning: failed to find clock_gettime in vDSO\n");
+
+       vdso_gettimeofday = (vgtod_t)dlsym(vdso, "__vdso_gettimeofday");
+       if (!vdso_gettimeofday)
+               printf("Warning: failed to find gettimeofday in vDSO\n");
+
 }
 
 static long sys_getcpu(unsigned * cpu, unsigned * node,
@@ -103,6 +121,16 @@ static long sys_getcpu(unsigned * cpu, unsigned * node,
        return syscall(__NR_getcpu, cpu, node, cache);
 }
 
+static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
+{
+       return syscall(__NR_clock_gettime, id, ts);
+}
+
+static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+       return syscall(__NR_gettimeofday, tv, tz);
+}
+
 static void test_getcpu(void)
 {
        printf("[RUN]\tTesting getcpu...\n");
@@ -155,10 +183,154 @@ static void test_getcpu(void)
        }
 }
 
+static bool ts_leq(const struct timespec *a, const struct timespec *b)
+{
+       if (a->tv_sec != b->tv_sec)
+               return a->tv_sec < b->tv_sec;
+       else
+               return a->tv_nsec <= b->tv_nsec;
+}
+
+static bool tv_leq(const struct timeval *a, const struct timeval *b)
+{
+       if (a->tv_sec != b->tv_sec)
+               return a->tv_sec < b->tv_sec;
+       else
+               return a->tv_usec <= b->tv_usec;
+}
+
+static char const * const clocknames[] = {
+       [0] = "CLOCK_REALTIME",
+       [1] = "CLOCK_MONOTONIC",
+       [2] = "CLOCK_PROCESS_CPUTIME_ID",
+       [3] = "CLOCK_THREAD_CPUTIME_ID",
+       [4] = "CLOCK_MONOTONIC_RAW",
+       [5] = "CLOCK_REALTIME_COARSE",
+       [6] = "CLOCK_MONOTONIC_COARSE",
+       [7] = "CLOCK_BOOTTIME",
+       [8] = "CLOCK_REALTIME_ALARM",
+       [9] = "CLOCK_BOOTTIME_ALARM",
+       [10] = "CLOCK_SGI_CYCLE",
+       [11] = "CLOCK_TAI",
+};
+
+static void test_one_clock_gettime(int clock, const char *name)
+{
+       struct timespec start, vdso, end;
+       int vdso_ret, end_ret;
+
+       printf("[RUN]\tTesting clock_gettime for clock %s (%d)...\n", name, clock);
+
+       if (sys_clock_gettime(clock, &start) < 0) {
+               if (errno == EINVAL) {
+                       vdso_ret = vdso_clock_gettime(clock, &vdso);
+                       if (vdso_ret == -EINVAL) {
+                               printf("[OK]\tNo such clock.\n");
+                       } else {
+                               printf("[FAIL]\tNo such clock, but __vdso_clock_gettime returned %d\n", vdso_ret);
+                               nerrs++;
+                       }
+               } else {
+                       printf("[WARN]\t clock_gettime(%d) syscall returned error %d\n", clock, errno);
+               }
+               return;
+       }
+
+       vdso_ret = vdso_clock_gettime(clock, &vdso);
+       end_ret = sys_clock_gettime(clock, &end);
+
+       if (vdso_ret != 0 || end_ret != 0) {
+               printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
+                      vdso_ret, errno);
+               nerrs++;
+               return;
+       }
+
+       printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
+              (unsigned long long)start.tv_sec, start.tv_nsec,
+              (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
+              (unsigned long long)end.tv_sec, end.tv_nsec);
+
+       if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) {
+               printf("[FAIL]\tTimes are out of sequence\n");
+               nerrs++;
+       }
+}
+
+static void test_clock_gettime(void)
+{
+       for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
+            clock++) {
+               test_one_clock_gettime(clock, clocknames[clock]);
+       }
+
+       /* Also test some invalid clock ids */
+       test_one_clock_gettime(-1, "invalid");
+       test_one_clock_gettime(INT_MIN, "invalid");
+       test_one_clock_gettime(INT_MAX, "invalid");
+}
+
+static void test_gettimeofday(void)
+{
+       struct timeval start, vdso, end;
+       struct timezone sys_tz, vdso_tz;
+       int vdso_ret, end_ret;
+
+       if (!vdso_gettimeofday)
+               return;
+
+       printf("[RUN]\tTesting gettimeofday...\n");
+
+       if (sys_gettimeofday(&start, &sys_tz) < 0) {
+               printf("[FAIL]\tsys_gettimeofday failed (%d)\n", errno);
+               nerrs++;
+               return;
+       }
+
+       vdso_ret = vdso_gettimeofday(&vdso, &vdso_tz);
+       end_ret = sys_gettimeofday(&end, NULL);
+
+       if (vdso_ret != 0 || end_ret != 0) {
+               printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
+                      vdso_ret, errno);
+               nerrs++;
+               return;
+       }
+
+       printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n",
+              (unsigned long long)start.tv_sec, start.tv_usec,
+              (unsigned long long)vdso.tv_sec, vdso.tv_usec,
+              (unsigned long long)end.tv_sec, end.tv_usec);
+
+       if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) {
+               printf("[FAIL]\tTimes are out of sequence\n");
+               nerrs++;
+       }
+
+       if (sys_tz.tz_minuteswest == vdso_tz.tz_minuteswest &&
+           sys_tz.tz_dsttime == vdso_tz.tz_dsttime) {
+               printf("[OK]\ttimezones match: minuteswest=%d, dsttime=%d\n",
+                      sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
+       } else {
+               printf("[FAIL]\ttimezones do not match\n");
+               nerrs++;
+       }
+
+       /* And make sure that passing NULL for tz doesn't crash. */
+       vdso_gettimeofday(&vdso, NULL);
+}
+
 int main(int argc, char **argv)
 {
        fill_function_pointers();
 
+       test_clock_gettime();
+       test_gettimeofday();
+
+       /*
+        * Test getcpu() last so that, if something goes wrong setting affinity,
+        * we still run the other tests.
+        */
        test_getcpu();
 
        return nerrs ? 1 : 0;