]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branches 'pm-devfreq' and 'pm-domains'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 6 May 2019 08:55:49 +0000 (10:55 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 6 May 2019 08:55:49 +0000 (10:55 +0200)
* pm-devfreq:
  PM / devfreq: add tracing for scheduling work
  trace: events: add devfreq trace event file
  PM / devfreq: rk3399_dmc: Pass ODT and auto power down parameters to TF-A.
  PM / devfreq: rockchip-dfi: Move GRF definitions to a common place.
  PM / devfreq: exynos-bus: Suspend all devices on system shutdown
  PM / devfreq: Fix static checker warning in try_then_request_governor
  PM / devfreq: Restart previous governor if new governor fails to start
  PM / devfreq: tegra: remove unneeded variable
  PM / devfreq: rockchip-dfi: remove unneeded semicolon
  PM / devfreq: rk3399_dmc: remove unneeded semicolon
  PM / devfreq: consistent indentation
  PM / devfreq: fix missing check of return value in devfreq_add_device()
  PM / devfreq: fix mem leak in devfreq_add_device()
  PM / devfreq: Use of_node_name_eq for node name comparisons

* pm-domains:
  PM / Domains: Allow to attach a CPU via genpd_dev_pm_attach_by_id|name()
  PM / Domains: Search for the CPU device outside the genpd lock
  PM / Domains: Drop unused in-parameter to some genpd functions
  PM / Domains: Use the base device for driver_deferred_probe_check_state()
  PM / Domains: Enable genpd_dev_pm_attach_by_id|name() for single PM domain
  PM / Domains: Allow OF lookup for multi PM domain case from ->attach_dev()
  PM / Domains: Don't kfree() the virtual device in the error path
  PM / Domains: remove unnecessary unlikely()

524 files changed:
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/pm/cpufreq.rst
Documentation/admin-guide/pm/cpuidle.rst
Documentation/admin-guide/pm/index.rst
Documentation/admin-guide/pm/intel_epb.rst [new file with mode: 0644]
Documentation/admin-guide/pm/intel_pstate.rst
Documentation/admin-guide/pm/sleep-states.rst
Documentation/admin-guide/pm/strategies.rst
Documentation/admin-guide/pm/system-wide.rst
Documentation/admin-guide/pm/working-state.rst
Documentation/devicetree/bindings/net/davinci_emac.txt
Documentation/devicetree/bindings/net/ethernet.txt
Documentation/devicetree/bindings/net/macb.txt
Documentation/driver-api/pm/cpuidle.rst
Documentation/driver-api/pm/devices.rst
Documentation/driver-api/pm/index.rst
Documentation/driver-api/pm/notifiers.rst
Documentation/driver-api/pm/types.rst
Documentation/media/uapi/rc/rc-tables.rst
Documentation/networking/decnet.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/rxrpc.txt
Documentation/sysctl/vm.txt
MAINTAINERS
Makefile
arch/alpha/kernel/syscalls/syscall.tbl
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/compressed/head.S
arch/arm/kernel/head-nommu.S
arch/arm/tools/syscall.tbl
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/ftrace.c
arch/arm64/mm/init.c
arch/ia64/kernel/syscalls/syscall.tbl
arch/m68k/kernel/syscalls/syscall.tbl
arch/microblaze/kernel/syscalls/syscall.tbl
arch/mips/ath79/setup.c
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/syscalls/syscall_n32.tbl
arch/mips/kernel/syscalls/syscall_n64.tbl
arch/mips/kernel/syscalls/syscall_o32.tbl
arch/parisc/kernel/syscalls/syscall.tbl
arch/powerpc/configs/skiroot_defconfig
arch/powerpc/kernel/syscalls/syscall.tbl
arch/powerpc/mm/mmu_context_iommu.c
arch/powerpc/platforms/Kconfig.cputype
arch/riscv/configs/rv32_defconfig [new file with mode: 0644]
arch/riscv/mm/init.c
arch/s390/boot/mem_detect.c
arch/s390/kernel/fpu.c
arch/s390/kernel/syscalls/syscall.tbl
arch/s390/kernel/vtime.c
arch/sh/kernel/syscalls/syscall.tbl
arch/sparc/kernel/syscalls/syscall.tbl
arch/x86/Kconfig
arch/x86/boot/compressed/misc.c
arch/x86/crypto/poly1305-avx2-x86_64.S
arch/x86/crypto/poly1305-sse2-x86_64.S
arch/x86/events/amd/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/perf_event.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/uapi/asm/vmx.h
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_epb.c [new file with mode: 0644]
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/process.c
arch/x86/kernel/reboot.c
arch/x86/kernel/tsc.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/pmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/dump_pagetables.c
arch/x86/mm/init.c
arch/x86/mm/ioremap.c
arch/x86/mm/kaslr.c
arch/x86/mm/tlb.c
arch/xtensa/kernel/syscalls/syscall.tbl
block/bfq-iosched.c
block/blk-mq.c
crypto/lrw.c
crypto/testmgr.h
crypto/xts.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/intel.c
drivers/acpi/processor_perflib.c
drivers/atm/firestream.c
drivers/base/memory.c
drivers/base/power/domain.c
drivers/base/power/domain_governor.c
drivers/base/power/main.c
drivers/base/power/wakeup.c
drivers/block/zram/zram_drv.c
drivers/char/ipmi/ipmi_dmi.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_hardcode.c
drivers/clocksource/Kconfig
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/timer-oxnas-rps.c
drivers/clocksource/timer-ti-dm.c
drivers/cpufreq/Kconfig
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/amd_freq_sensitivity.c
drivers/cpufreq/armada-37xx-cpufreq.c
drivers/cpufreq/armada-8k-cpufreq.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/cpufreq/maple-cpufreq.c
drivers/cpufreq/pasemi-cpufreq.c
drivers/cpufreq/pmac32-cpufreq.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/ppc_cbe_cpufreq.c
drivers/cpufreq/qoriq-cpufreq.c
drivers/cpufreq/speedstep-centrino.c
drivers/cpuidle/cpuidle-exynos.c
drivers/cpuidle/cpuidle.c
drivers/dma/bcm2835-dma.c
drivers/dma/mediatek/mtk-cqdma.c
drivers/dma/sh/rcar-dmac.c
drivers/extcon/Kconfig
drivers/firmware/Kconfig
drivers/firmware/Makefile
drivers/firmware/psci/Kconfig [new file with mode: 0644]
drivers/firmware/psci/Makefile [new file with mode: 0644]
drivers/firmware/psci/psci.c [moved from drivers/firmware/psci.c with 90% similarity]
drivers/firmware/psci/psci_checker.c [moved from drivers/firmware/psci_checker.c with 100% similarity]
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_prime.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/ipu-v3/ipu-dp.c
drivers/hid/hid-input.c
drivers/i3c/master.c
drivers/i3c/master/dw-i3c-master.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/adc/ad_sigma_delta.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/xilinx-xadc-core.c
drivers/iio/chemical/Kconfig
drivers/iio/chemical/bme680.h
drivers/iio/chemical/bme680_core.c
drivers/iio/chemical/bme680_i2c.c
drivers/iio/chemical/bme680_spi.c
drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
drivers/iio/dac/mcp4725.c
drivers/iio/gyro/bmg160_core.c
drivers/iio/gyro/mpu3050-core.c
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-core.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/rdmavt/mr.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/snvs_pwrkey.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/rmi4/rmi_driver.c
drivers/input/rmi4/rmi_f11.c
drivers/irqchip/irq-ath79-misc.c
drivers/isdn/mISDN/socket.c
drivers/misc/fastrpc.c
drivers/misc/habanalabs/goya/goya.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl1.h
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/atheros/atlx/atl2.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/neterion/vxge/vxge-config.c
drivers/net/ethernet/netronome/nfp/abm/cls.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_int.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/phy/spi_ks8995.c
drivers/net/slip/slhc.c
drivers/net/team/team.c
drivers/net/vrf.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/init.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt76/mt7603/init.c
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
drivers/net/wireless/mediatek/mt76/mt7603/main.c
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
drivers/nfc/st95hf/core.c
drivers/nvdimm/btt_devs.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pmem.c
drivers/nvdimm/security.c
drivers/of/of_net.c
drivers/opp/core.c
drivers/power/supply/goldfish_battery.c
drivers/s390/block/dasd_eckd.c
drivers/s390/char/con3270.c
drivers/s390/char/fs3270.c
drivers/s390/char/raw3270.c
drivers/s390/char/raw3270.h
drivers/s390/char/tty3270.c
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/pkey_api.c
drivers/s390/net/ctcm_main.c
drivers/scsi/aic7xxx/aic7770_osm.c
drivers/scsi/aic7xxx/aic7xxx.h
drivers/scsi/aic7xxx/aic7xxx_osm.c
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/scsi_lib.c
drivers/staging/comedi/drivers/ni_usb6501.c
drivers/staging/comedi/drivers/vmk80xx.c
drivers/staging/erofs/data.c
drivers/staging/iio/adc/ad7192.c
drivers/staging/iio/meter/ade7854.c
drivers/staging/most/core.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/sh-sci.c
drivers/tty/vt/vt.c
drivers/vhost/vhost.c
fs/afs/callback.c
fs/afs/cmservice.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/afs/server.c
fs/afs/write.c
fs/btrfs/file-item.c
fs/btrfs/ordered-data.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/snap.c
fs/cifs/cifsglob.h
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/dax.c
fs/io_uring.c
fs/nfsd/nfs3proc.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/proc/proc_sysctl.c
fs/proc/task_mmu.c
fs/splice.c
fs/userfaultfd.c
include/drm/ttm/ttm_bo_driver.h
include/dt-bindings/clock/sifive-fu540-prci.h [new file with mode: 0644]
include/linux/blkdev.h
include/linux/bvec.h
include/linux/cpufreq.h
include/linux/cpuhotplug.h
include/linux/cpuidle.h
include/linux/efi.h
include/linux/elevator.h
include/linux/etherdevice.h
include/linux/kprobes.h
include/linux/kvm_host.h
include/linux/netdevice.h
include/linux/pipe_fs_i.h
include/linux/pm_domain.h
include/linux/pm_opp.h
include/linux/sched/mm.h
include/linux/shmem_fs.h
include/linux/suspend.h
include/linux/tick.h
include/net/af_rxrpc.h
include/net/cfg80211.h
include/net/mac80211.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netrom.h
include/net/sock.h
include/net/tls.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/psci.h
include/uapi/rdma/mlx5-abi.h
init/main.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/kprobes.c
kernel/locking/lockdep.c
kernel/power/hibernate.c
kernel/power/main.c
kernel/power/suspend.c
kernel/power/user.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/signal.c
kernel/time/sched_clock.c
kernel/time/tick-common.c
kernel/time/tick-sched.c
kernel/time/timekeeping.h
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/watchdog_hld.c
lib/Kconfig.debug
lib/test_vmalloc.c
mm/kmemleak.c
mm/memory_hotplug.c
mm/mmap.c
mm/page_alloc.c
mm/percpu.c
mm/shmem.c
mm/slab.c
mm/swapfile.c
mm/vmscan.c
mm/vmstat.c
net/atm/lec.c
net/bluetooth/sco.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/netfilter/ebtables.c
net/core/dev.c
net/core/failover.c
net/core/filter.c
net/core/net-sysfs.c
net/core/ptp_classifier.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/fou.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_input.c
net/ipv6/addrlabel.c
net/ipv6/route.c
net/ipv6/udp.c
net/llc/af_llc.c
net/mac80211/driver-ops.h
net/mac80211/key.c
net/mac80211/mesh_pathtbl.c
net/mac80211/rx.c
net/mac80211/trace_msg.h
net/mac80211/tx.c
net/ncsi/ncsi-rsp.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/xt_time.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/netrom/nr_loopback.c
net/netrom/nr_route.c
net/netrom/sysctl_net_netrom.c
net/rds/af_rds.c
net/rds/bind.c
net/rds/ib_fmr.c
net/rds/ib_rdma.c
net/rose/rose_loopback.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/conn_event.c
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/peer_event.c
net/rxrpc/sendmsg.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc_close.c
net/smc/smc_close.h
net/smc/smc_ism.c
net/smc/smc_pnet.c
net/strparser/strparser.c
net/sunrpc/cache.c
net/sunrpc/clnt.c
net/tipc/link.c
net/tipc/name_table.c
net/tipc/sysctl.c
net/tls/tls_device.c
net/tls/tls_device_fallback.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/util.c
scripts/atomic/gen-atomics.sh
security/device_cgroup.c
sound/core/info.c
sound/core/init.c
sound/pci/hda/hda_codec.c
sound/pci/hda/patch_realtek.c
tools/include/uapi/sound/asound.h
tools/lib/traceevent/event-parse.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/scripts/python/export-to-sqlite.py
tools/perf/util/env.c
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/map.c
tools/perf/util/map.h
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/evmcs_test.c
tools/testing/selftests/kvm/x86_64/smm_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/state_test.c
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/run_afpackettests
tools/testing/selftests/net/run_netsocktests
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/conntrack_icmp_related.sh [new file with mode: 0755]
tools/testing/selftests/netfilter/nft_nat.sh
tools/testing/selftests/proc/proc-pid-vm.c
tools/testing/selftests/proc/proc-self-map-files-002.c
virt/kvm/irqchip.c
virt/kvm/kvm_main.c

index 9605dbd4b5b59ecc251913b1b327e6031cfb875f..7f4af7da3fbcd34ceee251d3ad708f636c86003c 100644 (file)
@@ -518,3 +518,21 @@ Description:       Control Symetric Multi Threading (SMT)
 
                         If control status is "forceoff" or "notsupported" writes
                         are rejected.
+
+What:          /sys/devices/system/cpu/cpu#/power/energy_perf_bias
+Date:          March 2019
+Contact:       linux-pm@vger.kernel.org
+Description:   Intel Energy and Performance Bias Hint (EPB)
+
+               EPB for the given CPU in a sliding scale 0 - 15, where a value
+               of 0 corresponds to a hint preference for highest performance
+               and a value of 15 corresponds to the maximum energy savings.
+
+               In order to change the EPB value for the CPU, write either
+               a number in the 0 - 15 sliding scale above, or one of the
+               strings: "performance", "balance-performance", "normal",
+               "balance-power", "power" (that represent values reflected by
+               their meaning), to this attribute.
+
+               This attribute is present for all online CPUs supporting the
+               Intel EPB feature.
index 7eca9026a9ed2c3ed2a35b7e2184660e8caa9fdf..0c74a778496480ac9899b7bce415798cc274c362 100644 (file)
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
 .. |struct cpufreq_policy| replace:: :c:type:`struct cpufreq_policy <cpufreq_policy>`
 .. |intel_pstate| replace:: :doc:`intel_pstate <intel_pstate>`
 
@@ -5,9 +8,10 @@
 CPU Performance Scaling
 =======================
 
-::
+:Copyright: |copy| 2017 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
 The Concept of CPU Performance Scaling
 ======================================
@@ -396,8 +400,8 @@ RT or deadline scheduling classes, the governor will increase the frequency to
 the allowed maximum (that is, the ``scaling_max_freq`` policy limit).  In turn,
 if it is invoked by the CFS scheduling class, the governor will use the
 Per-Entity Load Tracking (PELT) metric for the root control group of the
-given CPU as the CPU utilization estimate (see the `Per-entity load tracking`_
-LWN.net article for a description of the PELT mechanism).  Then, the new
+given CPU as the CPU utilization estimate (see the *Per-entity load tracking*
+LWN.net article [1]_ for a description of the PELT mechanism).  Then, the new
 CPU frequency to apply is computed in accordance with the formula
 
        f = 1.25 * ``f_0`` * ``util`` / ``max``
@@ -698,4 +702,8 @@ hardware feature (e.g. all Intel ones), even if the
 :c:macro:`CONFIG_X86_ACPI_CPUFREQ_CPB` configuration option is set.
 
 
-.. _Per-entity load tracking: https://lwn.net/Articles/531853/
+References
+==========
+
+.. [1] Jonathan Corbet, *Per-entity load tracking*,
+       https://lwn.net/Articles/531853/
index 9c58b35a81cbcf38eab2db94a3f3e01f60601e6f..e70b365dbc6030e758c20f6aa6165498393dd42d 100644 (file)
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
 .. |struct cpuidle_state| replace:: :c:type:`struct cpuidle_state <cpuidle_state>`
 .. |cpufreq| replace:: :doc:`CPU Performance Scaling <cpufreq>`
 
@@ -5,9 +8,10 @@
 CPU Idle Time Management
 ========================
 
-::
+:Copyright: |copy| 2018 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
- Copyright (c) 2018 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
 Concepts
 ========
index 49237ac734428b63951837ad40fd110f3b346dfb..39f8f9f81e7a3a3e5537e9927845d627f0f44b36 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ================
 Power Management
 ================
diff --git a/Documentation/admin-guide/pm/intel_epb.rst b/Documentation/admin-guide/pm/intel_epb.rst
new file mode 100644 (file)
index 0000000..0051211
--- /dev/null
@@ -0,0 +1,41 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+======================================
+Intel Performance and Energy Bias Hint
+======================================
+
+:Copyright: |copy| 2019 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+
+.. kernel-doc:: arch/x86/kernel/cpu/intel_epb.c
+   :doc: overview
+
+Intel Performance and Energy Bias Attribute in ``sysfs``
+========================================================
+
+The Intel Performance and Energy Bias Hint (EPB) value for a given (logical) CPU
+can be checked or updated through a ``sysfs`` attribute (file) under
+:file:`/sys/devices/system/cpu/cpu<N>/power/`, where the CPU number ``<N>``
+is allocated at the system initialization time:
+
+``energy_perf_bias``
+       Shows the current EPB value for the CPU in a sliding scale 0 - 15, where
+       a value of 0 corresponds to a hint preference for highest performance
+       and a value of 15 corresponds to the maximum energy savings.
+
+       In order to update the EPB value for the CPU, this attribute can be
+       written to, either with a number in the 0 - 15 sliding scale above, or
+       with one of the strings: "performance", "balance-performance", "normal",
+       "balance-power", "power" that represent values reflected by their
+       meaning.
+
+       This attribute is present for all online CPUs supporting the EPB
+       feature.
+
+Note that while the EPB interface to the processor is defined at the logical CPU
+level, the physical register backing it may be shared by multiple CPUs (for
+example, SMT siblings or cores in one package).  For this reason, updating the
+EPB value for one CPU may cause the EPB values for other CPUs to change.
index ec0f7c111f65b14c4328d7a9e0cd3314a8fba36f..67e414e34f37997e8bc620e904797cb3f3aa13c4 100644 (file)
@@ -1,10 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
 ===============================================
 ``intel_pstate`` CPU Performance Scaling Driver
 ===============================================
 
-::
+:Copyright: |copy| 2017 Intel Corporation
 
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
 
 General Information
@@ -20,11 +23,10 @@ you have not done that yet.]
 
 For the processors supported by ``intel_pstate``, the P-state concept is broader
 than just an operating frequency or an operating performance point (see the
-`LinuxCon Europe 2015 presentation by Kristen Accardi <LCEU2015_>`_ for more
+LinuxCon Europe 2015 presentation by Kristen Accardi [1]_ for more
 information about that).  For this reason, the representation of P-states used
 by ``intel_pstate`` internally follows the hardware specification (for details
-refer to `Intel® 64 and IA-32 Architectures Software Developer’s Manual
-Volume 3: System Programming Guide <SDM_>`_).  However, the ``CPUFreq`` core
+refer to Intel Software Developer’s Manual [2]_).  However, the ``CPUFreq`` core
 uses frequencies for identifying operating performance points of CPUs and
 frequencies are involved in the user space interface exposed by it, so
 ``intel_pstate`` maps its internal representation of P-states to frequencies too
@@ -561,9 +563,9 @@ or to pin every task potentially sensitive to them to a specific CPU.]
 
 On the majority of systems supported by ``intel_pstate``, the ACPI tables
 provided by the platform firmware contain ``_PSS`` objects returning information
-that can be used for CPU performance scaling (refer to the `ACPI specification`_
-for details on the ``_PSS`` objects and the format of the information returned
-by them).
+that can be used for CPU performance scaling (refer to the ACPI specification
+[3]_ for details on the ``_PSS`` objects and the format of the information
+returned by them).
 
 The information returned by the ACPI ``_PSS`` objects is used by the
 ``acpi-cpufreq`` scaling driver.  On systems supported by ``intel_pstate``
@@ -728,6 +730,14 @@ P-state is called, the ``ftrace`` filter can be set to to
            <idle>-0     [000] ..s.  2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func
 
 
-.. _LCEU2015: http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
-.. _SDM: http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html
-.. _ACPI specification: http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf
+References
+==========
+
+.. [1] Kristen Accardi, *Balancing Power and Performance in the Linux Kernel*,
+       http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
+
+.. [2] *Intel® 64 and IA-32 Architectures Software Developer’s Manual Volume 3: System Programming Guide*,
+       http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html
+
+.. [3] *Advanced Configuration and Power Interface Specification*,
+       https://uefi.org/sites/default/files/resources/ACPI_6_3_final_Jan30.pdf
index dbf5acd49f350de349bf228e9b7c824e2cfd04b1..cd3a28cb81f424d0962046ebb69292c214041150 100644 (file)
@@ -1,10 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
 ===================
 System Sleep States
 ===================
 
-::
+:Copyright: |copy| 2017 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
 Sleep states are global low-power states of the entire system in which user
 space code cannot be executed and the overall system activity is significantly
index afe4d3f831fe06fb7e04989cf317f1d5caaaebd8..dd0362e32fa55e90cd13784e1402310717416b39 100644 (file)
@@ -1,10 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
 ===========================
 Power Management Strategies
 ===========================
 
-::
+:Copyright: |copy| 2017 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
 The Linux kernel supports two major high-level power management strategies.
 
index 0c81e4c5de398db435e85d486cd821bebea65a33..2b1f987b34f007a3d2cffd56a4b64fee3f2aa4c4 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ============================
 System-Wide Power Management
 ============================
index b6cef9b5e961e6bdec653707e1457e0ddb87e013..fc298eb1234b07ba70e35ae26d1dd750834419d7 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ==============================
 Working-State Power Management
 ==============================
@@ -8,3 +10,4 @@ Working-State Power Management
    cpuidle
    cpufreq
    intel_pstate
+   intel_epb
index 24c5cdaba8d279a4b132fbd2f964ae1460b3fd0f..ca83dcc84fb8ee5cfd876cf0bb3d8af5fd85ba6b 100644 (file)
@@ -20,6 +20,8 @@ Required properties:
 Optional properties:
 - phy-handle: See ethernet.txt file in the same directory.
               If absent, davinci_emac driver defaults to 100/FULL.
+- nvmem-cells: phandle, reference to an nvmem node for the MAC address
+- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
 - ti,davinci-rmii-en: 1 byte, 1 means use RMII
 - ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
 
index cfc376bc977aa0a25e64d4e1ef617a1a326fe634..a6862158058461f5af428498ea14c98aed1f7775 100644 (file)
@@ -10,15 +10,14 @@ Documentation/devicetree/bindings/phy/phy-bindings.txt.
   the boot program; should be used in cases where the MAC address assigned to
   the device by the boot program is different from the "local-mac-address"
   property;
-- nvmem-cells: phandle, reference to an nvmem node for the MAC address;
-- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used;
 - max-speed: number, specifies maximum speed in Mbit/s supported by the device;
 - max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
   the maximum frame size (there's contradiction in the Devicetree
   Specification).
 - phy-mode: string, operation mode of the PHY interface. This is now a de-facto
   standard property; supported values are:
-  * "internal"
+  * "internal" (Internal means there is not a standard bus between the MAC and
+     the PHY, something proprietary is being used to embed the PHY in the MAC.)
   * "mii"
   * "gmii"
   * "sgmii"
index 174f292d8a3e8c14cf7d5d1105380b5f3d358544..8b80515729d7145cc05c9293857212ba914e0607 100644 (file)
@@ -26,6 +26,10 @@ Required properties:
        Optional elements: 'tsu_clk'
 - clocks: Phandles to input clocks.
 
+Optional properties:
+- nvmem-cells: phandle, reference to an nvmem node for the MAC address
+- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
+
 Optional properties for PHY child node:
 - reset-gpios : Should specify the gpio for phy reset
 - magic-packet : If present, indicates that the hardware supports waking
index 5842ab621a58c9c61fbfb13fb6b933a72bc889e6..006cf6db40c6d32c8a13b45930b1cdbc1d8f4c3e 100644 (file)
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
 .. |struct cpuidle_governor| replace:: :c:type:`struct cpuidle_governor <cpuidle_governor>`
 .. |struct cpuidle_device| replace:: :c:type:`struct cpuidle_device <cpuidle_device>`
 .. |struct cpuidle_driver| replace:: :c:type:`struct cpuidle_driver <cpuidle_driver>`
@@ -7,9 +10,9 @@
 CPU Idle Time Management
 ========================
 
-::
+:Copyright: |copy| 2019 Intel Corporation
 
- Copyright (c) 2019 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
 
 CPU Idle Time Management Subsystem
index 090c151aa86bd4f7728c7e72b2368c3c4cb0aeb6..30835683616a68b008eaa49968329d60fe368fe0 100644 (file)
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
 .. |struct dev_pm_ops| replace:: :c:type:`struct dev_pm_ops <dev_pm_ops>`
 .. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain <dev_pm_domain>`
 .. |struct bus_type| replace:: :c:type:`struct bus_type <bus_type>`
 Device Power Management Basics
 ==============================
 
-::
+:Copyright: |copy| 2010-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+:Copyright: |copy| 2010 Alan Stern <stern@rowland.harvard.edu>
+:Copyright: |copy| 2016 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
- Copyright (c) 2010-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- Copyright (c) 2010 Alan Stern <stern@rowland.harvard.edu>
- Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
 Most of the code in Linux is device drivers, so most of the Linux power
 management (PM) code is also driver-specific.  Most drivers will do very
index 56975c6bc78952d2e250a55542954a965df36413..c2a9ef8d115ceb91a646c6b9e4f41966b22116ce 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ===============================
 CPU and Device Power Management
 ===============================
index 62f860026992dc523f9e6e0c4a85fe85abc6b2d5..186435c43b77e59fb1d9c0676682885833e67cb8 100644 (file)
@@ -1,10 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
 =============================
 Suspend/Hibernation Notifiers
 =============================
 
-::
+:Copyright: |copy| 2016 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
- Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 
 There are some operations that subsystems or drivers may want to carry out
 before hibernation/suspend or after restore/resume, but they require the system
index 3ebdecc5410433bfffc1d17614e69600fd47e7f6..73a231caf764386d73c42073894e010b745a927b 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ==================================
 Device Power Management Data Types
 ==================================
index f460031d85313821ac91d940b915d26994d1ed00..177ac44fa0fac33363d34f840c663d4699039a79 100644 (file)
@@ -623,7 +623,7 @@ the remote via /dev/input/event devices.
 
     -  .. row 78
 
-       -  ``KEY_SCREEN``
+       -  ``KEY_ASPECT_RATIO``
 
        -  Select screen aspect ratio
 
@@ -631,7 +631,7 @@ the remote via /dev/input/event devices.
 
     -  .. row 79
 
-       -  ``KEY_ZOOM``
+       -  ``KEY_FULL_SCREEN``
 
        -  Put device into zoom/full screen mode
 
index e12a4900cf72cb00b1ade4c0257a23c93d2d8f21..d192f8b9948b5483c16b83f41a5b25c1e5cda846 100644 (file)
@@ -22,8 +22,6 @@ you'll need the following options as well...
     CONFIG_DECNET_ROUTER (to be able to add/delete routes)
     CONFIG_NETFILTER (will be required for the DECnet routing daemon)
 
-    CONFIG_DECNET_ROUTE_FWMARK is optional
-
 Don't turn on SIOCGIFCONF support for DECnet unless you are really sure
 that you need it, in general you won't and it can cause ifconfig to
 malfunction.
index acdfb5d2bcaa44a8a0ecdcfcae14202d1ed75bc3..e2142fe40cdad09e80f43110a2f93dc59b5900f8 100644 (file)
@@ -422,6 +422,7 @@ tcp_min_rtt_wlen - INTEGER
        minimum RTT when it is moved to a longer path (e.g., due to traffic
        engineering). A longer window makes the filter more resistant to RTT
        inflations such as transient congestion. The unit is seconds.
+       Possible values: 0 - 86400 (1 day)
        Default: 300
 
 tcp_moderate_rcvbuf - BOOLEAN
index 2df5894353d6954f5c0dd26d2c149c6e9ee6ee4c..cd7303d7fa25dac9ae38d0e73186f3687b7872a7 100644 (file)
@@ -1009,16 +1009,18 @@ The kernel interface functions are as follows:
 
  (*) Check call still alive.
 
-       u32 rxrpc_kernel_check_life(struct socket *sock,
-                                   struct rxrpc_call *call);
+       bool rxrpc_kernel_check_life(struct socket *sock,
+                                    struct rxrpc_call *call,
+                                    u32 *_life);
        void rxrpc_kernel_probe_life(struct socket *sock,
                                     struct rxrpc_call *call);
 
-     The first function returns a number that is updated when ACKs are received
-     from the peer (notably including PING RESPONSE ACKs which we can elicit by
-     sending PING ACKs to see if the call still exists on the server).  The
-     caller should compare the numbers of two calls to see if the call is still
-     alive after waiting for a suitable interval.
+     The first function passes back in *_life a number that is updated when
+     ACKs are received from the peer (notably including PING RESPONSE ACKs
+     which we can elicit by sending PING ACKs to see if the call still exists
+     on the server).  The caller should compare the numbers of two calls to see
+     if the call is still alive after waiting for a suitable interval.  It also
+     returns true as long as the call hasn't yet reached the completed state.
 
      This allows the caller to work out if the server is still contactable and
      if the call is still alive on the server while waiting for the server to
index 6af24cdb25ccb51a947d0bf50f3442b53a963d81..3f13d8599337ea8a010d3a33ae605201691a427e 100644 (file)
@@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
 increase the success rate of future high-order allocations such as SLUB
 allocations, THP and hugetlbfs pages.
 
-To make it sensible with respect to the watermark_scale_factor parameter,
-the unit is in fractions of 10,000. The default value of 15,000 means
-that up to 150% of the high watermark will be reclaimed in the event of
-a pageblock being mixed due to fragmentation. The level of reclaim is
-determined by the number of fragmentation events that occurred in the
-recent past. If this value is smaller than a pageblock then a pageblocks
-worth of pages will be reclaimed (e.g.  2MB on 64-bit x86). A boost factor
-of 0 will disable the feature.
+To make it sensible with respect to the watermark_scale_factor
+parameter, the unit is in fractions of 10,000. The default value of
+15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
+watermark will be reclaimed in the event of a pageblock being mixed due
+to fragmentation. The level of reclaim is determined by the number of
+fragmentation events that occurred in the recent past. If this value is
+smaller than a pageblock then a pageblocks worth of pages will be reclaimed
+(e.g.  2MB on 64-bit x86). A boost factor of 0 will disable the feature.
 
 =============================================================
 
index 27ed10966c8175a1ac677cdc08ee3f4166002ea4..dbb490f2b0a054ac6f332c2d4eb16cd500fa76bb 100644 (file)
@@ -3121,6 +3121,7 @@ F:        drivers/cpufreq/bmips-cpufreq.c
 BROADCOM BMIPS MIPS ARCHITECTURE
 M:     Kevin Cernekee <cernekee@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
+L:     bcm-kernel-feedback-list@broadcom.com
 L:     linux-mips@vger.kernel.org
 T:     git git://github.com/broadcom/stblinux.git
 S:     Maintained
@@ -7334,7 +7335,6 @@ F:        Documentation/devicetree/bindings/i3c/
 F:     Documentation/driver-api/i3c
 F:     drivers/i3c/
 F:     include/linux/i3c/
-F:     include/dt-bindings/i3c/
 
 I3C DRIVER FOR SYNOPSYS DESIGNWARE
 M:     Vitor Soares <vitor.soares@synopsys.com>
@@ -8709,6 +8709,7 @@ F:        scripts/leaking_addresses.pl
 LED SUBSYSTEM
 M:     Jacek Anaszewski <jacek.anaszewski@gmail.com>
 M:     Pavel Machek <pavel@ucw.cz>
+R:     Dan Murphy <dmurphy@ti.com>
 L:     linux-leds@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
 S:     Maintained
@@ -10146,7 +10147,7 @@ F:      drivers/spi/spi-at91-usart.c
 F:     Documentation/devicetree/bindings/mfd/atmel-usart.txt
 
 MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
-M:     Woojung Huh <Woojung.Huh@microchip.com>
+M:     Woojung Huh <woojung.huh@microchip.com>
 M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -12416,7 +12417,7 @@ M:      Mark Rutland <mark.rutland@arm.com>
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 L:     linux-arm-kernel@lists.infradead.org
 S:     Maintained
-F:     drivers/firmware/psci*.c
+F:     drivers/firmware/psci/
 F:     include/linux/psci.h
 F:     include/uapi/linux/psci.h
 
index 71fd5c2ce06750c8f3dc19675c3e4a392f964d14..2b99679148dc74fa73f05149adb2c144c0988a5d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
index 63ed39cbd3bd13a40e98ec3a9aff3b3266b3e235..165f268beafc471e14eac4c8d6d08e52c5c89864 100644 (file)
 532    common  getppid                         sys_getppid
 # all other architectures have common numbers for new syscall, alpha
 # is the exception.
+534    common  pidfd_send_signal               sys_pidfd_send_signal
+535    common  io_uring_setup                  sys_io_uring_setup
+536    common  io_uring_enter                  sys_io_uring_enter
+537    common  io_uring_register               sys_io_uring_register
index 850b4805e2d171436e539b326867d6ce08a6f9d6..9aed25a6019bc991166294b0923121ba513509dc 100644 (file)
@@ -73,7 +73,7 @@ config ARM
        select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
        select HAVE_EXIT_THREAD
        select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
-       select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL
+       select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
        select HAVE_FUNCTION_TRACER if !XIP_KERNEL
        select HAVE_GCC_PLUGINS
        select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
index 6d6e0330930b52f7369a46536473fa7174fad2d9..e388af4594a6e5e42a860469e10a53b89522e7bf 100644 (file)
@@ -47,8 +47,8 @@ config DEBUG_WX
 
 choice
        prompt "Choose kernel unwinder"
-       default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
-       default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
+       default UNWINDER_ARM if AEABI
+       default UNWINDER_FRAME_POINTER if !AEABI
        help
          This determines which method will be used for unwinding kernel stack
          traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
@@ -65,7 +65,7 @@ config UNWINDER_FRAME_POINTER
 
 config UNWINDER_ARM
        bool "ARM EABI stack unwinder"
-       depends on AEABI
+       depends on AEABI && !FUNCTION_GRAPH_TRACER
        select ARM_UNWIND
        help
          This option enables stack unwinding support in the kernel
index 6c7ccb428c079c3e43ef9cce2c344ec4b6809369..7135820f76d4f8b8d24374738332c0c4c0644bf7 100644 (file)
@@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
 
                @ Preserve return value of efi_entry() in r4
                mov     r4, r0
-               bl      cache_clean_flush
+
+               @ our cache maintenance code relies on CP15 barrier instructions
+               @ but since we arrived here with the MMU and caches configured
+               @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
+               @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
+               @ the enable path will be executed on v7+ only.
+               mrc     p15, 0, r1, c1, c0, 0   @ read SCTLR
+               tst     r1, #(1 << 5)           @ CP15BEN bit set?
+               bne     0f
+               orr     r1, r1, #(1 << 5)       @ CP15 barrier instructions
+               mcr     p15, 0, r1, c1, c0, 0   @ write SCTLR
+ ARM(          .inst   0xf57ff06f              @ v7+ isb       )
+ THUMB(                isb                                             )
+
+0:             bl      cache_clean_flush
                bl      cache_off
 
                @ Set parameters for booting zImage according to boot protocol
index c08d2d890f7b918981c472c155c6df368a1b30b3..b38bbd011b358f433e3c9201fdb2015611286252 100644 (file)
@@ -133,9 +133,9 @@ __secondary_data:
  */
        .text
 __after_proc_init:
-#ifdef CONFIG_ARM_MPU
 M_CLASS(movw   r12, #:lower16:BASEADDR_V7M_SCB)
 M_CLASS(movt   r12, #:upper16:BASEADDR_V7M_SCB)
+#ifdef CONFIG_ARM_MPU
 M_CLASS(ldr    r3, [r12, 0x50])
 AR_CLASS(mrc   p15, 0, r3, c0, c1, 4)          @ Read ID_MMFR0
        and     r3, r3, #(MMFR0_PMSA)           @ PMSA field
index 9016f4081bb9cff33886860e9a1d48f0ee58e47c..0393917eaa57aaf8cda4548b9a34a705be6c73a0 100644 (file)
 421    common  rt_sigtimedwait_time64          sys_rt_sigtimedwait
 422    common  futex_time64                    sys_futex
 423    common  sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index e1d95f08f8e127d2e7bf334f94b146669aab79cb..c7e1a7837706c17eeffd96edd17bcc4da0009af2 100644 (file)
@@ -50,7 +50,7 @@ do {                                                                  \
 static inline int
 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
 {
-       int oldval, ret, tmp;
+       int oldval = 0, ret, tmp;
        u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
 
        pagefault_disable();
index d1dd93436e1eedad0ea3cf83ba1cdc6b3fd50c22..f2a83ff6b73c2414110c02dc14aa24686d6ada9c 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE + 5)
 #define __ARM_NR_COMPAT_END            (__ARM_NR_COMPAT_BASE + 0x800)
 
-#define __NR_compat_syscalls           424
+#define __NR_compat_syscalls           428
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index 5590f262369079bca3b66561a51e9b3f4705cdd7..23f1a44acada413fb4e2ad5411624d2925c71835 100644 (file)
@@ -866,6 +866,14 @@ __SYSCALL(__NR_rt_sigtimedwait_time64, compat_sys_rt_sigtimedwait_time64)
 __SYSCALL(__NR_futex_time64, sys_futex)
 #define __NR_sched_rr_get_interval_time64 423
 __SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
+#define __NR_pidfd_send_signal 424
+__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
+#define __NR_io_uring_setup 425
+__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
+#define __NR_io_uring_enter 426
+__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
+#define __NR_io_uring_register 427
+__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
 
 /*
  * Please add new compat syscalls above this comment and update
index 07b298120182042d2a1dea18160ef63e5a678b9d..65a51331088eb0afd0db70e52fb03335cc8151dc 100644 (file)
@@ -103,10 +103,15 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                 * to be revisited if support for multiple ftrace entry points
                 * is added in the future, but for now, the pr_err() below
                 * deals with a theoretical issue only.
+                *
+                * Note that PLTs are place relative, and plt_entries_equal()
+                * checks whether they point to the same target. Here, we need
+                * to check if the actual opcodes are in fact identical,
+                * regardless of the offset in memory so use memcmp() instead.
                 */
                trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
-               if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-                                      &trampoline)) {
+               if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
+                          sizeof(trampoline))) {
                        if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
                                pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
                                return -EINVAL;
index 6bc135042f5e4dc244dbf14e8ea953121931ad2b..7cae155e81a5fb71aa8148865e44d9482bfb5b9a 100644 (file)
@@ -363,7 +363,7 @@ void __init arm64_memblock_init(void)
                 * Otherwise, this is a no-op
                 */
                u64 base = phys_initrd_start & PAGE_MASK;
-               u64 size = PAGE_ALIGN(phys_initrd_size);
+               u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
 
                /*
                 * We can only add back the initrd memory if we don't end up
index ab9cda5f6136ad60753de5e725f6a6271ad88e9c..56e3d0b685e19119afc0a3e244ca64c3752aca4e 100644 (file)
 332    common  pkey_free                       sys_pkey_free
 333    common  rseq                            sys_rseq
 # 334 through 423 are reserved to sync up with other architectures
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 125c14178979c010648895bd7925f85e5e54b10d..df4ec3ec71d1518bfac752044f7a1eae9291535a 100644 (file)
 421    common  rt_sigtimedwait_time64          sys_rt_sigtimedwait
 422    common  futex_time64                    sys_futex
 423    common  sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 8ee3a8c18498eb591ab9d1fc2b2044d43afa1cd4..4964947732af3e37bd5d651aaad9a3f3ccd39056 100644 (file)
 421    common  rt_sigtimedwait_time64          sys_rt_sigtimedwait
 422    common  futex_time64                    sys_futex
 423    common  sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 4a70c5de8c929bad778788db2f6c46b3cc2633a2..25a57895a3a359f6f7ded09785bf6e4cc15ea1db 100644 (file)
@@ -210,12 +210,6 @@ const char *get_system_type(void)
        return ath79_sys_type;
 }
 
-int get_c0_perfcount_int(void)
-{
-       return ATH79_MISC_IRQ(5);
-}
-EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
-
 unsigned int get_c0_compare_int(void)
 {
        return CP0_LEGACY_COMPARE_IRQ;
index f158c5894a9a8760d3c1ec3430617bad976fac97..feb2653490dfe7a744b0eaa41d913297d9392ed5 100644 (file)
@@ -125,7 +125,7 @@ trace_a_syscall:
        subu    t1, v0,  __NR_O32_Linux
        move    a1, v0
        bnez    t1, 1f /* __NR_syscall at offset 0 */
-       lw      a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+       ld      a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
        .set    pop
 
 1:     jal     syscall_trace_enter
index 15f4117900ee8d8c9285b61dd332b3c1832558ea..9392dfe33f97ec48a74014d3bc49940dafdc944d 100644 (file)
 421    n32     rt_sigtimedwait_time64          compat_sys_rt_sigtimedwait_time64
 422    n32     futex_time64                    sys_futex
 423    n32     sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    n32     pidfd_send_signal               sys_pidfd_send_signal
+425    n32     io_uring_setup                  sys_io_uring_setup
+426    n32     io_uring_enter                  sys_io_uring_enter
+427    n32     io_uring_register               sys_io_uring_register
index c85502e67b44145420d6638489300aae4388cefe..cd0c8aa21fbacfb7563c39123f0880d2b753a7c2 100644 (file)
 327    n64     rseq                            sys_rseq
 328    n64     io_pgetevents                   sys_io_pgetevents
 # 329 through 423 are reserved to sync up with other architectures
+424    n64     pidfd_send_signal               sys_pidfd_send_signal
+425    n64     io_uring_setup                  sys_io_uring_setup
+426    n64     io_uring_enter                  sys_io_uring_enter
+427    n64     io_uring_register               sys_io_uring_register
index 2e063d0f837e78c3cb566d68374c33e7bcdd9a8e..e849e8ffe4a25b4516cdc748abfa96bb2c918ebe 100644 (file)
 421    o32     rt_sigtimedwait_time64          sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait_time64
 422    o32     futex_time64                    sys_futex                       sys_futex
 423    o32     sched_rr_get_interval_time64    sys_sched_rr_get_interval       sys_sched_rr_get_interval
+424    o32     pidfd_send_signal               sys_pidfd_send_signal
+425    o32     io_uring_setup                  sys_io_uring_setup
+426    o32     io_uring_enter                  sys_io_uring_enter
+427    o32     io_uring_register               sys_io_uring_register
index b26766c6647dc7a40fd3235460902112c20cd3d4..fe8ca623add89a627710b697f7886fc879589ac2 100644 (file)
 421    32      rt_sigtimedwait_time64          sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait_time64
 422    32      futex_time64                    sys_futex                       sys_futex
 423    32      sched_rr_get_interval_time64    sys_sched_rr_get_interval       sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 5ba131c30f6bcded4e65ccc40bb8aa2595e44ff1..1bcd468ab422dc100b120607b03d5d587850b453 100644 (file)
@@ -266,6 +266,7 @@ CONFIG_UDF_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_HUGETLBFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS=y
index b18abb0c3dae6248cfd697b1b9ee2343c39e1ba3..00f5a63c8d9a65aefd60df95b75d9cfae1fe8493 100644 (file)
 421    32      rt_sigtimedwait_time64          sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait_time64
 422    32      futex_time64                    sys_futex                       sys_futex
 423    32      sched_rr_get_interval_time64    sys_sched_rr_get_interval       sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index e7a9c4f6bfca49585beffcb6fc3dc755eb054e8f..8330f135294f48ecfff9bb5d3555f6fa3e3514c3 100644 (file)
@@ -95,28 +95,15 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
                              unsigned long entries, unsigned long dev_hpa,
                              struct mm_iommu_table_group_mem_t **pmem)
 {
-       struct mm_iommu_table_group_mem_t *mem;
-       long i, ret, locked_entries = 0;
+       struct mm_iommu_table_group_mem_t *mem, *mem2;
+       long i, ret, locked_entries = 0, pinned = 0;
        unsigned int pageshift;
-
-       mutex_lock(&mem_list_mutex);
-
-       list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
-                       next) {
-               /* Overlap? */
-               if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
-                               (ua < (mem->ua +
-                                      (mem->entries << PAGE_SHIFT)))) {
-                       ret = -EINVAL;
-                       goto unlock_exit;
-               }
-
-       }
+       unsigned long entry, chunk;
 
        if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
                ret = mm_iommu_adjust_locked_vm(mm, entries, true);
                if (ret)
-                       goto unlock_exit;
+                       return ret;
 
                locked_entries = entries;
        }
@@ -148,17 +135,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
        }
 
        down_read(&mm->mmap_sem);
-       ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL);
+       chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
+                       sizeof(struct vm_area_struct *);
+       chunk = min(chunk, entries);
+       for (entry = 0; entry < entries; entry += chunk) {
+               unsigned long n = min(entries - entry, chunk);
+
+               ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
+                               FOLL_WRITE, mem->hpages + entry, NULL);
+               if (ret == n) {
+                       pinned += n;
+                       continue;
+               }
+               if (ret > 0)
+                       pinned += ret;
+               break;
+       }
        up_read(&mm->mmap_sem);
-       if (ret != entries) {
-               /* free the reference taken */
-               for (i = 0; i < ret; i++)
-                       put_page(mem->hpages[i]);
-
-               vfree(mem->hpas);
-               kfree(mem);
-               ret = -EFAULT;
-               goto unlock_exit;
+       if (pinned != entries) {
+               if (!ret)
+                       ret = -EFAULT;
+               goto free_exit;
        }
 
        pageshift = PAGE_SHIFT;
@@ -183,21 +180,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
        }
 
 good_exit:
-       ret = 0;
        atomic64_set(&mem->mapped, 1);
        mem->used = 1;
        mem->ua = ua;
        mem->entries = entries;
-       *pmem = mem;
 
-       list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
+       mutex_lock(&mem_list_mutex);
 
-unlock_exit:
-       if (locked_entries && ret)
-               mm_iommu_adjust_locked_vm(mm, locked_entries, false);
+       list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
+               /* Overlap? */
+               if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
+                               (ua < (mem2->ua +
+                                      (mem2->entries << PAGE_SHIFT)))) {
+                       ret = -EINVAL;
+                       mutex_unlock(&mem_list_mutex);
+                       goto free_exit;
+               }
+       }
+
+       list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
 
        mutex_unlock(&mem_list_mutex);
 
+       *pmem = mem;
+
+       return 0;
+
+free_exit:
+       /* free the reference taken */
+       for (i = 0; i < pinned; i++)
+               put_page(mem->hpages[i]);
+
+       vfree(mem->hpas);
+       kfree(mem);
+
+unlock_exit:
+       mm_iommu_adjust_locked_vm(mm, locked_entries, false);
+
        return ret;
 }
 
@@ -266,7 +285,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
 {
        long ret = 0;
-       unsigned long entries, dev_hpa;
+       unsigned long unlock_entries = 0;
 
        mutex_lock(&mem_list_mutex);
 
@@ -287,17 +306,17 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
                goto unlock_exit;
        }
 
+       if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
+               unlock_entries = mem->entries;
+
        /* @mapped became 0 so now mappings are disabled, release the region */
-       entries = mem->entries;
-       dev_hpa = mem->dev_hpa;
        mm_iommu_release(mem);
 
-       if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
-               mm_iommu_adjust_locked_vm(mm, entries, false);
-
 unlock_exit:
        mutex_unlock(&mem_list_mutex);
 
+       mm_iommu_adjust_locked_vm(mm, unlock_entries, false);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(mm_iommu_put);
index 842b2c7e156aba4cb2a04d8897fb7aa6128c3b4d..50cd09b4e05d51a9d9c46722065f9ebf5e55295c 100644 (file)
@@ -324,7 +324,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
 
 config PPC_RADIX_MMU
        bool "Radix MMU Support"
-       depends on PPC_BOOK3S_64
+       depends on PPC_BOOK3S_64 && HUGETLB_PAGE
        select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
        default y
        help
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
new file mode 100644 (file)
index 0000000..1a911ed
--- /dev/null
@@ -0,0 +1,84 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_ARCH_RV32I=y
+CONFIG_SMP=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_XILINX=y
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_MACB=y
+CONFIG_E1000E=y
+CONFIG_R8169=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
+CONFIG_HVC_RISCV_SBI=y
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_SIFIVE_PLIC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_RCU_TRACE is not set
index 5fd8c922e1c225f504c5c81349b49492f8489d19..bc7b77e34d0920f2190c7e8c4edd18658c526703 100644 (file)
@@ -121,6 +121,14 @@ void __init setup_bootmem(void)
                         */
                        memblock_reserve(reg->base, vmlinux_end - reg->base);
                        mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
+
+                       /*
+                        * Remove memblock from the end of usable area to the
+                        * end of region
+                        */
+                       if (reg->base + mem_size < end)
+                               memblock_remove(reg->base + mem_size,
+                                               end - reg->base - mem_size);
                }
        }
        BUG_ON(mem_size == 0);
index 4cb771ba13fa7fb39da31ac6a8428744c8026d16..5d316fe40480446b9dd5f90fc0bb4f3bba6d3b55 100644 (file)
@@ -25,7 +25,7 @@ static void *mem_detect_alloc_extended(void)
 {
        unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
 
-       if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
            INITRD_START < offset + ENTRIES_EXTENDED_MAX)
                offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
 
index 594464f2129d4706fc4786d2de55d7c73974c97c..0da378e2eb25edcfee1f787b50eb900947b2ffc4 100644 (file)
@@ -23,7 +23,7 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
 
        if (flags & KERNEL_FPC)
                /* Save floating point control */
-               asm volatile("stfpc %0" : "=m" (state->fpc));
+               asm volatile("stfpc %0" : "=Q" (state->fpc));
 
        if (!MACHINE_HAS_VX) {
                if (flags & KERNEL_VXR_V0V7) {
index 02579f95f391b6524ddd28004cc6d6a511be974b..061418f787c3712f4091cfeb94b8dfb5d2b1eb03 100644 (file)
 421    32      rt_sigtimedwait_time64  -                               compat_sys_rt_sigtimedwait_time64
 422    32      futex_time64            -                               sys_futex
 423    32      sched_rr_get_interval_time64    -                       sys_sched_rr_get_interval
+424  common    pidfd_send_signal       sys_pidfd_send_signal           sys_pidfd_send_signal
+425  common    io_uring_setup          sys_io_uring_setup              sys_io_uring_setup
+426  common    io_uring_enter          sys_io_uring_enter              sys_io_uring_enter
+427  common    io_uring_register       sys_io_uring_register           sys_io_uring_register
index a69a0911ed0e82720b10b124d0153681f2c821ea..c475ca49cfc6b43c02ab924e218e541a92b677b8 100644 (file)
@@ -37,7 +37,7 @@ static inline u64 get_vtimer(void)
 {
        u64 timer;
 
-       asm volatile("stpt %0" : "=m" (timer));
+       asm volatile("stpt %0" : "=Q" (timer));
        return timer;
 }
 
@@ -48,7 +48,7 @@ static inline void set_vtimer(u64 expires)
        asm volatile(
                "       stpt    %0\n"   /* Store current cpu timer value */
                "       spt     %1"     /* Set new value imm. afterwards */
-               : "=m" (timer) : "m" (expires));
+               : "=Q" (timer) : "Q" (expires));
        S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
        S390_lowcore.last_update_timer = expires;
 }
@@ -135,8 +135,8 @@ static int do_account_vtime(struct task_struct *tsk)
 #else
                "       stck    %1"     /* Store current tod clock value */
 #endif
-               : "=m" (S390_lowcore.last_update_timer),
-                 "=m" (S390_lowcore.last_update_clock));
+               : "=Q" (S390_lowcore.last_update_timer),
+                 "=Q" (S390_lowcore.last_update_clock));
        clock = S390_lowcore.last_update_clock - clock;
        timer -= S390_lowcore.last_update_timer;
 
index bfda678576e4335788f844db6ec7632fda5faedf..480b057556ee45a3871485ce7301d2436cca8255 100644 (file)
 421    common  rt_sigtimedwait_time64          sys_rt_sigtimedwait
 422    common  futex_time64                    sys_futex
 423    common  sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index b9a5a04b2d2c543791088b69aae612ed56a97e5e..a1dd24307b001aa95801d3e24003ffd719711728 100644 (file)
 421    32      rt_sigtimedwait_time64          sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait_time64
 422    32      futex_time64                    sys_futex                       sys_futex
 423    32      sched_rr_get_interval_time64    sys_sched_rr_get_interval       sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 5ad92419be19c5d67fae37158bac2b93e105d1bc..62fc3fda1a058eed944d0a37dfe8565b80ac0fd8 100644 (file)
@@ -1499,7 +1499,7 @@ config X86_CPA_STATISTICS
        depends on DEBUG_FS
        ---help---
          Expose statistics about the Change Page Attribute mechanims, which
-         helps to determine the effectivness of preserving large and huge
+         helps to determine the effectiveness of preserving large and huge
          page mappings when mapping protections are changed.
 
 config ARCH_HAS_MEM_ENCRYPT
index c0d6c560df69e0e63941a34539660770304ff612..5a237e8dbf8d563504a6cfcb4a67a2b7350bed0a 100644 (file)
@@ -352,7 +352,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
        boot_params->hdr.loadflags &= ~KASLR_FLAG;
 
        /* Save RSDP address for later use. */
-       boot_params->acpi_rsdp_addr = get_rsdp_addr();
+       /* boot_params->acpi_rsdp_addr = get_rsdp_addr(); */
 
        sanitize_boot_params(boot_params);
 
index 3b6e70d085da89775317c8e2a560625ab4799e01..8457cdd47f751167a2321ebf063eb18bdb4ef8aa 100644 (file)
@@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
        vpaddq          t2,t1,t1
        vmovq           t1x,d4
 
+       # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+       # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+       # amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+       # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+       # integers.  It's true in a single-block implementation, but not here.
+
        # d1 += d0 >> 26
        mov             d0,%rax
        shr             $26,%rax
@@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
        # h0 += (d4 >> 26) * 5
        mov             d4,%rax
        shr             $26,%rax
-       lea             (%eax,%eax,4),%eax
-       add             %eax,%ebx
+       lea             (%rax,%rax,4),%rax
+       add             %rax,%rbx
        # h4 = d4 & 0x3ffffff
        mov             d4,%rax
        and             $0x3ffffff,%eax
        mov             %eax,h4
 
        # h1 += h0 >> 26
-       mov             %ebx,%eax
-       shr             $26,%eax
+       mov             %rbx,%rax
+       shr             $26,%rax
        add             %eax,h1
        # h0 = h0 & 0x3ffffff
        andl            $0x3ffffff,%ebx
index e6add74d78a595b63789d419100b7c30b024e0fc..6f0be7a869641c92c4993e378b53c07a7d385f29 100644 (file)
@@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
        # h0 += (d4 >> 26) * 5
        mov             d4,%rax
        shr             $26,%rax
-       lea             (%eax,%eax,4),%eax
-       add             %eax,%ebx
+       lea             (%rax,%rax,4),%rax
+       add             %rax,%rbx
        # h4 = d4 & 0x3ffffff
        mov             d4,%rax
        and             $0x3ffffff,%eax
        mov             %eax,h4
 
        # h1 += h0 >> 26
-       mov             %ebx,%eax
-       shr             $26,%eax
+       mov             %rbx,%rax
+       shr             $26,%rax
        add             %eax,h1
        # h0 = h0 & 0x3ffffff
        andl            $0x3ffffff,%ebx
@@ -524,6 +524,12 @@ ENTRY(poly1305_2block_sse2)
        paddq           t2,t1
        movq            t1,d4
 
+       # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+       # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+       # amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+       # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+       # integers.  It's true in a single-block implementation, but not here.
+
        # d1 += d0 >> 26
        mov             d0,%rax
        shr             $26,%rax
@@ -562,16 +568,16 @@ ENTRY(poly1305_2block_sse2)
        # h0 += (d4 >> 26) * 5
        mov             d4,%rax
        shr             $26,%rax
-       lea             (%eax,%eax,4),%eax
-       add             %eax,%ebx
+       lea             (%rax,%rax,4),%rax
+       add             %rax,%rbx
        # h4 = d4 & 0x3ffffff
        mov             d4,%rax
        and             $0x3ffffff,%eax
        mov             %eax,h4
 
        # h1 += h0 >> 26
-       mov             %ebx,%eax
-       shr             $26,%eax
+       mov             %rbx,%rax
+       shr             $26,%rax
        add             %eax,h1
        # h0 = h0 & 0x3ffffff
        andl            $0x3ffffff,%ebx
index 0ecfac84ba9111306faeb900375b6402d52afa07..d45f3fbd232ea7c34ccf1ceda5f97ce8fefd48b8 100644 (file)
@@ -117,22 +117,39 @@ static __initconst const u64 amd_hw_cache_event_ids
 };
 
 /*
- * AMD Performance Monitor K7 and later.
+ * AMD Performance Monitor K7 and later, up to and including Family 16h:
  */
 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 {
-  [PERF_COUNT_HW_CPU_CYCLES]                   = 0x0076,
-  [PERF_COUNT_HW_INSTRUCTIONS]                 = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]             = 0x077d,
-  [PERF_COUNT_HW_CACHE_MISSES]                 = 0x077e,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]          = 0x00c2,
-  [PERF_COUNT_HW_BRANCH_MISSES]                        = 0x00c3,
-  [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]      = 0x00d0, /* "Decoder empty" event */
-  [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]       = 0x00d1, /* "Dispatch stalls" event */
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x077d,
+       [PERF_COUNT_HW_CACHE_MISSES]            = 0x077e,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = 0x00d1, /* "Dispatch stalls" event */
+};
+
+/*
+ * AMD Performance Monitor Family 17h and later:
+ */
+static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0xff60,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = 0x0187,
 };
 
 static u64 amd_pmu_event_map(int hw_event)
 {
+       if (boot_cpu_data.x86 >= 0x17)
+               return amd_f17h_perfmon_event_map[hw_event];
+
        return amd_perfmon_event_map[hw_event];
 }
 
index f61dcbef20ffee301a5904717846fe1d1c1c6f31..f9451566cd9b2c4398d56b18f669a2e0dde8a98c 100644 (file)
@@ -3131,7 +3131,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
                flags &= ~PERF_SAMPLE_TIME;
        if (!event->attr.exclude_kernel)
                flags &= ~PERF_SAMPLE_REGS_USER;
-       if (event->attr.sample_regs_user & ~PEBS_REGS)
+       if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
                flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
        return flags;
 }
index 94a4b7fc75d0ecf344bade95be1cf563576250d2..d41de9af7a39b52bcd2913b2f1ef5edf4f3552b3 100644 (file)
  *                            Scope: Package (physical package)
  *     MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
  *                            perf code: 0x04
- *                            Available model: HSW ULT,CNL
+ *                            Available model: HSW ULT,KBL,CNL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
  *                            perf code: 0x05
- *                            Available model: HSW ULT,CNL
+ *                            Available model: HSW ULT,KBL,CNL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
  *                            perf code: 0x06
- *                            Available model: HSW ULT,GLM,CNL
+ *                            Available model: HSW ULT,KBL,GLM,CNL
  *                            Scope: Package (physical package)
  *
  */
@@ -566,8 +566,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
 
-       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  snb_cstates),
-       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  hswult_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
 
        X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
 
index a75955741c50422b9894d454c1a33ec7c9790a77..1e98a42b560ad2d9a320b7e3198ff11e3dde3c6d 100644 (file)
@@ -96,25 +96,25 @@ struct amd_nb {
        PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
        PERF_SAMPLE_PERIOD)
 
-#define PEBS_REGS \
-       (PERF_REG_X86_AX | \
-        PERF_REG_X86_BX | \
-        PERF_REG_X86_CX | \
-        PERF_REG_X86_DX | \
-        PERF_REG_X86_DI | \
-        PERF_REG_X86_SI | \
-        PERF_REG_X86_SP | \
-        PERF_REG_X86_BP | \
-        PERF_REG_X86_IP | \
-        PERF_REG_X86_FLAGS | \
-        PERF_REG_X86_R8 | \
-        PERF_REG_X86_R9 | \
-        PERF_REG_X86_R10 | \
-        PERF_REG_X86_R11 | \
-        PERF_REG_X86_R12 | \
-        PERF_REG_X86_R13 | \
-        PERF_REG_X86_R14 | \
-        PERF_REG_X86_R15)
+#define PEBS_GP_REGS                   \
+       ((1ULL << PERF_REG_X86_AX)    | \
+        (1ULL << PERF_REG_X86_BX)    | \
+        (1ULL << PERF_REG_X86_CX)    | \
+        (1ULL << PERF_REG_X86_DX)    | \
+        (1ULL << PERF_REG_X86_DI)    | \
+        (1ULL << PERF_REG_X86_SI)    | \
+        (1ULL << PERF_REG_X86_SP)    | \
+        (1ULL << PERF_REG_X86_BP)    | \
+        (1ULL << PERF_REG_X86_IP)    | \
+        (1ULL << PERF_REG_X86_FLAGS) | \
+        (1ULL << PERF_REG_X86_R8)    | \
+        (1ULL << PERF_REG_X86_R9)    | \
+        (1ULL << PERF_REG_X86_R10)   | \
+        (1ULL << PERF_REG_X86_R11)   | \
+        (1ULL << PERF_REG_X86_R12)   | \
+        (1ULL << PERF_REG_X86_R13)   | \
+        (1ULL << PERF_REG_X86_R14)   | \
+        (1ULL << PERF_REG_X86_R15))
 
 /*
  * Per register state.
index 93c4bf598fb06c7e53865141dd3e7faa514194ff..feab24cac610e25f276d3d1f71f4705c23106b00 100644 (file)
@@ -226,7 +226,9 @@ struct x86_emulate_ops {
 
        unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
        void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
-       int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+       int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate);
+       void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
 
 };
 
index 159b5988292f33ec2d1a079bf7d10ba2bc999d4b..a9d03af340307db6589376cf3bfb29a533910cdd 100644 (file)
@@ -126,7 +126,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 }
 
 #define KVM_PERMILLE_MMU_PAGES 20
-#define KVM_MIN_ALLOC_MMU_PAGES 64
+#define KVM_MIN_ALLOC_MMU_PAGES 64UL
 #define KVM_MMU_HASH_SHIFT 12
 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 #define KVM_MIN_FREE_MMU_PAGES 5
@@ -844,9 +844,9 @@ enum kvm_irqchip_mode {
 };
 
 struct kvm_arch {
-       unsigned int n_used_mmu_pages;
-       unsigned int n_requested_mmu_pages;
-       unsigned int n_max_mmu_pages;
+       unsigned long n_used_mmu_pages;
+       unsigned long n_requested_mmu_pages;
+       unsigned long n_max_mmu_pages;
        unsigned int indirect_shadow_pages;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
@@ -1182,7 +1182,7 @@ struct kvm_x86_ops {
 
        int (*smi_allowed)(struct kvm_vcpu *vcpu);
        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
-       int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
+       int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
        int (*enable_smi_window)(struct kvm_vcpu *vcpu);
 
        int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@@ -1256,8 +1256,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                   gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
-unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
 
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
 bool pdptrs_changed(struct kvm_vcpu *vcpu);
@@ -1592,4 +1592,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
 #define put_smstate(type, buf, offset, val)                      \
        *(type *)((buf) + (offset) - 0x7e00) = val
 
+#define GET_SMSTATE(type, buf, offset)         \
+       (*(type *)((buf) + (offset) - 0x7e00))
+
 #endif /* _ASM_X86_KVM_HOST_H */
index f0b0c90dd398246eb2882050d69c6b53ccca11af..d213ec5c3766db0dd5176c951b13e5f3c1514cfb 100644 (file)
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
+#define VMX_ABORT_VMCS_CORRUPTED             3
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
 
 #endif /* _UAPIVMX_H */
index cfd24f9f761442392c9d42540835c31001bda71f..1796d2bdcaaa2f1f23bf420b92e8be54bcd9bee1 100644 (file)
@@ -28,7 +28,7 @@ obj-y                 += cpuid-deps.o
 obj-$(CONFIG_PROC_FS)  += proc.o
 obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
 
-obj-$(CONFIG_CPU_SUP_INTEL)            += intel.o intel_pconfig.o
+obj-$(CONFIG_CPU_SUP_INTEL)            += intel.o intel_pconfig.o intel_epb.o
 obj-$(CONFIG_CPU_SUP_AMD)              += amd.o
 obj-$(CONFIG_CPU_SUP_HYGON)            += hygon.o
 obj-$(CONFIG_CPU_SUP_CYRIX_32)         += cyrix.o
index 2da82eff0eb4f8498c8cdd65bd9f9dd5fa1fa6eb..b91b3bfa5cfbef1fb0cb8cbfa4011aa404e6c073 100644 (file)
@@ -275,7 +275,7 @@ static const struct {
        const char                      *option;
        enum spectre_v2_user_cmd        cmd;
        bool                            secure;
-} v2_user_options[] __initdata = {
+} v2_user_options[] __initconst = {
        { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
        { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
        { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
@@ -419,7 +419,7 @@ static const struct {
        const char *option;
        enum spectre_v2_mitigation_cmd cmd;
        bool secure;
-} mitigation_options[] __initdata = {
+} mitigation_options[] __initconst = {
        { "off",                SPECTRE_V2_CMD_NONE,              false },
        { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
        { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
@@ -658,7 +658,7 @@ static const char * const ssb_strings[] = {
 static const struct {
        const char *option;
        enum ssb_mitigation_cmd cmd;
-} ssb_mitigation_options[]  __initdata = {
+} ssb_mitigation_options[]  __initconst = {
        { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
        { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
        { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
index cb28e98a0659abc5051b1e7fcb936b692a1a1264..5e37dfa4d9df2b5a5fa74a996a85ed740ea5553f 100644 (file)
@@ -1864,23 +1864,6 @@ void cpu_init(void)
 }
 #endif
 
-static void bsp_resume(void)
-{
-       if (this_cpu->c_bsp_resume)
-               this_cpu->c_bsp_resume(&boot_cpu_data);
-}
-
-static struct syscore_ops cpu_syscore_ops = {
-       .resume         = bsp_resume,
-};
-
-static int __init init_cpu_syscore(void)
-{
-       register_syscore_ops(&cpu_syscore_ops);
-       return 0;
-}
-core_initcall(init_cpu_syscore);
-
 /*
  * The microcode loader calls this upon late microcode load to recheck features,
  * only when microcode has been updated. Caller holds microcode_mutex and CPU
index 5eb946b9a9f36d3e1dadad391c3df5c7658f7c71..c0e2407abdd6ab0342982b795f3deb8a5037d474 100644 (file)
@@ -14,7 +14,6 @@ struct cpu_dev {
        void            (*c_init)(struct cpuinfo_x86 *);
        void            (*c_identify)(struct cpuinfo_x86 *);
        void            (*c_detect_tlb)(struct cpuinfo_x86 *);
-       void            (*c_bsp_resume)(struct cpuinfo_x86 *);
        int             c_x86_vendor;
 #ifdef CONFIG_X86_32
        /* Optional vendor specific routine to obtain the cache size. */
index fc3c07fe7df58a22c01c8c1180d0b394bde8b59a..f17c1a714779b5a77cdedbae01a2091d9bce361a 100644 (file)
@@ -596,36 +596,6 @@ static void detect_tme(struct cpuinfo_x86 *c)
        c->x86_phys_bits -= keyid_bits;
 }
 
-static void init_intel_energy_perf(struct cpuinfo_x86 *c)
-{
-       u64 epb;
-
-       /*
-        * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
-        * (x86_energy_perf_policy(8) is available to change it at run-time.)
-        */
-       if (!cpu_has(c, X86_FEATURE_EPB))
-               return;
-
-       rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
-       if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
-               return;
-
-       pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
-       pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
-       epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
-       wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
-}
-
-static void intel_bsp_resume(struct cpuinfo_x86 *c)
-{
-       /*
-        * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
-        * so reinitialize it properly like during bootup:
-        */
-       init_intel_energy_perf(c);
-}
-
 static void init_cpuid_fault(struct cpuinfo_x86 *c)
 {
        u64 msr;
@@ -763,8 +733,6 @@ static void init_intel(struct cpuinfo_x86 *c)
        if (cpu_has(c, X86_FEATURE_TME))
                detect_tme(c);
 
-       init_intel_energy_perf(c);
-
        init_intel_misc_features(c);
 }
 
@@ -1023,9 +991,7 @@ static const struct cpu_dev intel_cpu_dev = {
        .c_detect_tlb   = intel_detect_tlb,
        .c_early_init   = early_init_intel,
        .c_init         = init_intel,
-       .c_bsp_resume   = intel_bsp_resume,
        .c_x86_vendor   = X86_VENDOR_INTEL,
 };
 
 cpu_dev_register(intel_cpu_dev);
-
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
new file mode 100644 (file)
index 0000000..f4dd733
--- /dev/null
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Performance and Energy Bias Hint support.
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author:
+ *     Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/syscore_ops.h>
+#include <linux/pm.h>
+
+#include <asm/cpufeature.h>
+#include <asm/msr.h>
+
+/**
+ * DOC: overview
+ *
+ * The Performance and Energy Bias Hint (EPB) allows software to specify its
+ * preference with respect to the power-performance tradeoffs present in the
+ * processor.  Generally, the EPB is expected to be set by user space (directly
+ * via sysfs or with the help of the x86_energy_perf_policy tool), but there are
+ * two reasons for the kernel to update it.
+ *
+ * First, there are systems where the platform firmware resets the EPB during
+ * system-wide transitions from sleep states back into the working state
+ * effectively causing the previous EPB updates by user space to be lost.
+ * Thus the kernel needs to save the current EPB values for all CPUs during
+ * system-wide transitions to sleep states and restore them on the way back to
+ * the working state.  That can be achieved by saving EPB for secondary CPUs
+ * when they are taken offline during transitions into system sleep states and
+ * for the boot CPU in a syscore suspend operation, so that it can be restored
+ * for the boot CPU in a syscore resume operation and for the other CPUs when
+ * they are brought back online.  However, CPUs that are already offline when
+ * a system-wide PM transition is started are not taken offline again, but their
+ * EPB values may still be reset by the platform firmware during the transition,
+ * so in fact it is necessary to save the EPB of any CPU taken offline and to
+ * restore it when the given CPU goes back online at all times.
+ *
+ * Second, on many systems the initial EPB value coming from the platform
+ * firmware is 0 ('performance') and at least on some of them that is because
+ * the platform firmware does not initialize EPB at all with the assumption that
+ * the OS will do that anyway.  That sometimes is problematic, as it may cause
+ * the system battery to drain too fast, for example, so it is better to adjust
+ * it on CPU bring-up and if the initial EPB value for a given CPU is 0, the
+ * kernel changes it to 6 ('normal').
+ */
+
+static DEFINE_PER_CPU(u8, saved_epb);
+
+#define EPB_MASK       0x0fULL
+#define EPB_SAVED      0x10ULL
+#define MAX_EPB                EPB_MASK
+
+static int intel_epb_save(void)
+{
+       u64 epb;
+
+       rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+       /*
+        * Ensure that saved_epb will always be nonzero after this write even if
+        * the EPB value read from the MSR is 0.
+        */
+       this_cpu_write(saved_epb, (epb & EPB_MASK) | EPB_SAVED);
+
+       return 0;
+}
+
+static void intel_epb_restore(void)
+{
+       u64 val = this_cpu_read(saved_epb);
+       u64 epb;
+
+       rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+       if (val) {
+               val &= EPB_MASK;
+       } else {
+               /*
+                * Because intel_epb_save() has not run for the current CPU yet,
+                * it is going online for the first time, so if its EPB value is
+                * 0 ('performance') at this point, assume that it has not been
+                * initialized by the platform firmware and set it to 6
+                * ('normal').
+                */
+               val = epb & EPB_MASK;
+               if (val == ENERGY_PERF_BIAS_PERFORMANCE) {
+                       val = ENERGY_PERF_BIAS_NORMAL;
+                       pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
+               }
+       }
+       wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val);
+}
+
+static struct syscore_ops intel_epb_syscore_ops = {
+       .suspend = intel_epb_save,
+       .resume = intel_epb_restore,
+};
+
+static const char * const energy_perf_strings[] = {
+       "performance",
+       "balance-performance",
+       "normal",
+       "balance-power",
+       "power"
+};
+static const u8 energ_perf_values[] = {
+       ENERGY_PERF_BIAS_PERFORMANCE,
+       ENERGY_PERF_BIAS_BALANCE_PERFORMANCE,
+       ENERGY_PERF_BIAS_NORMAL,
+       ENERGY_PERF_BIAS_BALANCE_POWERSAVE,
+       ENERGY_PERF_BIAS_POWERSAVE
+};
+
+static ssize_t energy_perf_bias_show(struct device *dev,
+                                    struct device_attribute *attr,
+                                    char *buf)
+{
+       unsigned int cpu = dev->id;
+       u64 epb;
+       int ret;
+
+       ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%llu\n", epb);
+}
+
+static ssize_t energy_perf_bias_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       unsigned int cpu = dev->id;
+       u64 epb, val;
+       int ret;
+
+       ret = __sysfs_match_string(energy_perf_strings,
+                                  ARRAY_SIZE(energy_perf_strings), buf);
+       if (ret >= 0)
+               val = energ_perf_values[ret];
+       else if (kstrtou64(buf, 0, &val) || val > MAX_EPB)
+               return -EINVAL;
+
+       ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+       if (ret < 0)
+               return ret;
+
+       ret = wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS,
+                           (epb & ~EPB_MASK) | val);
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(energy_perf_bias);
+
+static struct attribute *intel_epb_attrs[] = {
+       &dev_attr_energy_perf_bias.attr,
+       NULL
+};
+
+static const struct attribute_group intel_epb_attr_group = {
+       .name = power_group_name,
+       .attrs =  intel_epb_attrs
+};
+
+static int intel_epb_online(unsigned int cpu)
+{
+       struct device *cpu_dev = get_cpu_device(cpu);
+
+       intel_epb_restore();
+       if (!cpuhp_tasks_frozen)
+               sysfs_merge_group(&cpu_dev->kobj, &intel_epb_attr_group);
+
+       return 0;
+}
+
+static int intel_epb_offline(unsigned int cpu)
+{
+       struct device *cpu_dev = get_cpu_device(cpu);
+
+       if (!cpuhp_tasks_frozen)
+               sysfs_unmerge_group(&cpu_dev->kobj, &intel_epb_attr_group);
+
+       intel_epb_save();
+       return 0;
+}
+
+static __init int intel_epb_init(void)
+{
+       int ret;
+
+       if (!boot_cpu_has(X86_FEATURE_EPB))
+               return -ENODEV;
+
+       ret = cpuhp_setup_state(CPUHP_AP_X86_INTEL_EPB_ONLINE,
+                               "x86/intel/epb:online", intel_epb_online,
+                               intel_epb_offline);
+       if (ret < 0)
+               goto err_out_online;
+
+       register_syscore_ops(&intel_epb_syscore_ops);
+       return 0;
+
+err_out_online:
+       cpuhp_remove_state(CPUHP_AP_X86_INTEL_EPB_ONLINE);
+       return ret;
+}
+subsys_initcall(intel_epb_init);
index 54b9eef3eea97189a032cccf56ec4e001cc77ec5..85212a32b54df8be06365eeaf4ef399996a3abff 100644 (file)
@@ -2610,9 +2610,10 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
                        rdt_last_cmd_puts("Failed to initialize allocations\n");
                        return ret;
                }
-               rdtgrp->mode = RDT_MODE_SHAREABLE;
        }
 
+       rdtgrp->mode = RDT_MODE_SHAREABLE;
+
        return 0;
 }
 
index a034cb808e7eb482e6fd8eae3fac9afca63b429c..fed46ddb1eef2d3de307f1cbb899f45c4b3e67c2 100644 (file)
@@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
        unsigned long *sara = stack_addr(regs);
 
        ri->ret_addr = (kprobe_opcode_t *) *sara;
+       ri->fp = sara;
 
        /* Replace the return addr with trampoline addr */
        *sara = (unsigned long) &kretprobe_trampoline;
@@ -748,26 +749,48 @@ asm(
 NOKPROBE_SYMBOL(kretprobe_trampoline);
 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
 
+static struct kprobe kretprobe_kprobe = {
+       .addr = (void *)kretprobe_trampoline,
+};
+
 /*
  * Called from kretprobe_trampoline
  */
 static __used void *trampoline_handler(struct pt_regs *regs)
 {
+       struct kprobe_ctlblk *kcb;
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
        struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
        kprobe_opcode_t *correct_ret_addr = NULL;
+       void *frame_pointer;
+       bool skipped = false;
+
+       preempt_disable();
+
+       /*
+        * Set a dummy kprobe for avoiding kretprobe recursion.
+        * Since kretprobe never run in kprobe handler, kprobe must not
+        * be running at this point.
+        */
+       kcb = get_kprobe_ctlblk();
+       __this_cpu_write(current_kprobe, &kretprobe_kprobe);
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 
        INIT_HLIST_HEAD(&empty_rp);
        kretprobe_hash_lock(current, &head, &flags);
        /* fixup registers */
 #ifdef CONFIG_X86_64
        regs->cs = __KERNEL_CS;
+       /* On x86-64, we use pt_regs->sp for return address holder. */
+       frame_pointer = &regs->sp;
 #else
        regs->cs = __KERNEL_CS | get_kernel_rpl();
        regs->gs = 0;
+       /* On x86-32, we use pt_regs->flags for return address holder. */
+       frame_pointer = &regs->flags;
 #endif
        regs->ip = trampoline_address;
        regs->orig_ax = ~0UL;
@@ -789,8 +812,25 @@ static __used void *trampoline_handler(struct pt_regs *regs)
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
+               /*
+                * Return probes must be pushed on this hash list correct
+                * order (same as return order) so that it can be poped
+                * correctly. However, if we find it is pushed it incorrect
+                * order, this means we find a function which should not be
+                * probed, because the wrong order entry is pushed on the
+                * path of processing other kretprobe itself.
+                */
+               if (ri->fp != frame_pointer) {
+                       if (!skipped)
+                               pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
+                       skipped = true;
+                       continue;
+               }
 
                orig_ret_address = (unsigned long)ri->ret_addr;
+               if (skipped)
+                       pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
+                               ri->rp->kp.addr);
 
                if (orig_ret_address != trampoline_address)
                        /*
@@ -808,14 +848,15 @@ static __used void *trampoline_handler(struct pt_regs *regs)
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
+               if (ri->fp != frame_pointer)
+                       continue;
 
                orig_ret_address = (unsigned long)ri->ret_addr;
                if (ri->rp && ri->rp->handler) {
                        __this_cpu_write(current_kprobe, &ri->rp->kp);
-                       get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
                        ri->ret_addr = correct_ret_addr;
                        ri->rp->handler(ri, regs);
-                       __this_cpu_write(current_kprobe, NULL);
+                       __this_cpu_write(current_kprobe, &kretprobe_kprobe);
                }
 
                recycle_rp_inst(ri, &empty_rp);
@@ -831,6 +872,9 @@ static __used void *trampoline_handler(struct pt_regs *regs)
 
        kretprobe_hash_unlock(current, &flags);
 
+       __this_cpu_write(current_kprobe, NULL);
+       preempt_enable();
+
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
index 58ac7be52c7a6df944dca7305492b8ce70ed8d8e..957eae13b37008339b6dfaad350ea9468fccb760 100644 (file)
@@ -426,6 +426,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
        u64 msr = x86_spec_ctrl_base;
        bool updmsr = false;
 
+       lockdep_assert_irqs_disabled();
+
        /*
         * If TIF_SSBD is different, select the proper mitigation
         * method. Note that if SSBD mitigation is disabled or permanentely
@@ -477,10 +479,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
 
 void speculation_ctrl_update(unsigned long tif)
 {
+       unsigned long flags;
+
        /* Forced update. Make sure all relevant TIF flags are different */
-       preempt_disable();
+       local_irq_save(flags);
        __speculation_ctrl_update(~tif, tif);
-       preempt_enable();
+       local_irq_restore(flags);
 }
 
 /* Called from seccomp/prctl update */
index 725624b6c0c05cdc0c94175214a7ce796df47eee..8fd3cedd9accdd1c17757e5a381b2ab1eac1c032 100644 (file)
@@ -81,6 +81,19 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
        return 0;
 }
 
+/*
+ * Some machines don't handle the default ACPI reboot method and
+ * require the EFI reboot method:
+ */
+static int __init set_efi_reboot(const struct dmi_system_id *d)
+{
+       if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) {
+               reboot_type = BOOT_EFI;
+               pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident);
+       }
+       return 0;
+}
+
 void __noreturn machine_real_restart(unsigned int type)
 {
        local_irq_disable();
@@ -166,6 +179,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
                },
        },
+       {       /* Handle reboot issue on Acer TravelMate X514-51T */
+               .callback = set_efi_reboot,
+               .ident = "Acer TravelMate X514-51T",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate X514-51T"),
+               },
+       },
 
        /* Apple */
        {       /* Handle problems with rebooting on Apple MacBook5 */
index 3fae238340699376ef2b5f84d4b6d4c7f60c48c5..cc6df5c6d7b3cf09b920ba65d69a115bfb637808 100644 (file)
@@ -185,8 +185,7 @@ static void __init cyc2ns_init_boot_cpu(void)
 /*
  * Secondary CPUs do not run through tsc_init(), so set up
  * all the scale factors for all CPUs, assuming the same
- * speed as the bootup CPU. (cpufreq notifiers will fix this
- * up if their speed diverges)
+ * speed as the bootup CPU.
  */
 static void __init cyc2ns_init_secondary_cpus(void)
 {
@@ -937,12 +936,12 @@ void tsc_restore_sched_clock_state(void)
 }
 
 #ifdef CONFIG_CPU_FREQ
-/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
+/*
+ * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
  * changes.
  *
- * RED-PEN: On SMP we assume all CPUs run with the same frequency.  It's
- * not that important because current Opteron setups do not support
- * scaling on SMP anyroads.
+ * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
+ * as unstable and give up in those cases.
  *
  * Should fix up last_tsc too. Currently gettimeofday in the
  * first tick after the change will be slightly wrong.
@@ -956,22 +955,22 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
                                void *data)
 {
        struct cpufreq_freqs *freq = data;
-       unsigned long *lpj;
 
-       lpj = &boot_cpu_data.loops_per_jiffy;
-#ifdef CONFIG_SMP
-       if (!(freq->flags & CPUFREQ_CONST_LOOPS))
-               lpj = &cpu_data(freq->cpu).loops_per_jiffy;
-#endif
+       if (num_online_cpus() > 1) {
+               mark_tsc_unstable("cpufreq changes on SMP");
+               return 0;
+       }
 
        if (!ref_freq) {
                ref_freq = freq->old;
-               loops_per_jiffy_ref = *lpj;
+               loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
                tsc_khz_ref = tsc_khz;
        }
+
        if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-                       (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
-               *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
+               boot_cpu_data.loops_per_jiffy =
+                       cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
 
                tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
                if (!(freq->flags & CPUFREQ_CONST_LOOPS))
index bad8c51fee6eea6be91d7a594e820470c121c2a9..a5127b2c195f9df3031e1df660764bc1624078f2 100644 (file)
@@ -362,7 +362,7 @@ SECTIONS
        .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
                __bss_start = .;
                *(.bss..page_aligned)
-               *(.bss)
+               *(BSS_MAIN)
                BSS_DECRYPTED
                . = ALIGN(PAGE_SIZE);
                __bss_stop = .;
index c338984c850d28a1213e46f86efc06d425115660..d0d5dd44b4f478524cc959cefb245695d9e40894 100644 (file)
@@ -2331,24 +2331,18 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
 
 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
 {
+#ifdef CONFIG_X86_64
        u32 eax, ebx, ecx, edx;
 
        eax = 0x80000001;
        ecx = 0;
        ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
        return edx & bit(X86_FEATURE_LM);
+#else
+       return false;
+#endif
 }
 
-#define GET_SMSTATE(type, smbase, offset)                                \
-       ({                                                                \
-        type __val;                                                      \
-        int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
-                                     sizeof(__val));                     \
-        if (r != X86EMUL_CONTINUE)                                       \
-                return X86EMUL_UNHANDLEABLE;                             \
-        __val;                                                           \
-       })
-
 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
 {
        desc->g    = (flags >> 23) & 1;
@@ -2361,27 +2355,30 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
        desc->type = (flags >>  8) & 15;
 }
 
-static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
+                          int n)
 {
        struct desc_struct desc;
        int offset;
        u16 selector;
 
-       selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
+       selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
 
        if (n < 3)
                offset = 0x7f84 + n * 12;
        else
                offset = 0x7f2c + (n - 3) * 12;
 
-       set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
-       set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
-       rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
+       set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
+       set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
+       rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
        return X86EMUL_CONTINUE;
 }
 
-static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+#ifdef CONFIG_X86_64
+static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
+                          int n)
 {
        struct desc_struct desc;
        int offset;
@@ -2390,15 +2387,16 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 
        offset = 0x7e00 + n * 16;
 
-       selector =                GET_SMSTATE(u16, smbase, offset);
-       rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
-       set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
-       set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
-       base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
+       selector =                GET_SMSTATE(u16, smstate, offset);
+       rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
+       set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
+       set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
+       base3 =                   GET_SMSTATE(u32, smstate, offset + 12);
 
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
        return X86EMUL_CONTINUE;
 }
+#endif
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
                                    u64 cr0, u64 cr3, u64 cr4)
@@ -2445,7 +2443,8 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
        return X86EMUL_CONTINUE;
 }
 
-static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
@@ -2453,53 +2452,55 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
        u32 val, cr0, cr3, cr4;
        int i;
 
-       cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-       cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
-       ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
-       ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
+       cr0 =                      GET_SMSTATE(u32, smstate, 0x7ffc);
+       cr3 =                      GET_SMSTATE(u32, smstate, 0x7ff8);
+       ctxt->eflags =             GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
+       ctxt->_eip =               GET_SMSTATE(u32, smstate, 0x7ff0);
 
        for (i = 0; i < 8; i++)
-               *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
+               *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
 
-       val = GET_SMSTATE(u32, smbase, 0x7fcc);
+       val = GET_SMSTATE(u32, smstate, 0x7fcc);
        ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
-       val = GET_SMSTATE(u32, smbase, 0x7fc8);
+       val = GET_SMSTATE(u32, smstate, 0x7fc8);
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
-       selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
-       set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
-       set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
-       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
+       selector =                 GET_SMSTATE(u32, smstate, 0x7fc4);
+       set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f64));
+       set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f60));
+       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f5c));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
 
-       selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
-       set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
-       set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
-       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
+       selector =                 GET_SMSTATE(u32, smstate, 0x7fc0);
+       set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f80));
+       set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f7c));
+       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f78));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
 
-       dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
-       dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
+       dt.address =               GET_SMSTATE(u32, smstate, 0x7f74);
+       dt.size =                  GET_SMSTATE(u32, smstate, 0x7f70);
        ctxt->ops->set_gdt(ctxt, &dt);
 
-       dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
-       dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
+       dt.address =               GET_SMSTATE(u32, smstate, 0x7f58);
+       dt.size =                  GET_SMSTATE(u32, smstate, 0x7f54);
        ctxt->ops->set_idt(ctxt, &dt);
 
        for (i = 0; i < 6; i++) {
-               int r = rsm_load_seg_32(ctxt, smbase, i);
+               int r = rsm_load_seg_32(ctxt, smstate, i);
                if (r != X86EMUL_CONTINUE)
                        return r;
        }
 
-       cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
+       cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
 
-       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
+       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
 
        return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
-static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
+#ifdef CONFIG_X86_64
+static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
@@ -2509,43 +2510,43 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        int i, r;
 
        for (i = 0; i < 16; i++)
-               *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
+               *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
 
-       ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
-       ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
+       ctxt->_eip   = GET_SMSTATE(u64, smstate, 0x7f78);
+       ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
 
-       val = GET_SMSTATE(u32, smbase, 0x7f68);
+       val = GET_SMSTATE(u32, smstate, 0x7f68);
        ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
-       val = GET_SMSTATE(u32, smbase, 0x7f60);
+       val = GET_SMSTATE(u32, smstate, 0x7f60);
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
-       cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-       cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
-       cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
-       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
-       val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
+       cr0 =                       GET_SMSTATE(u64, smstate, 0x7f58);
+       cr3 =                       GET_SMSTATE(u64, smstate, 0x7f50);
+       cr4 =                       GET_SMSTATE(u64, smstate, 0x7f48);
+       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
+       val =                       GET_SMSTATE(u64, smstate, 0x7ed0);
        ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
 
-       selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
-       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
-       set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
-       set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
-       base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
+       selector =                  GET_SMSTATE(u32, smstate, 0x7e90);
+       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e92) << 8);
+       set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e94));
+       set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e98));
+       base3 =                     GET_SMSTATE(u32, smstate, 0x7e9c);
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
 
-       dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
-       dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
+       dt.size =                   GET_SMSTATE(u32, smstate, 0x7e84);
+       dt.address =                GET_SMSTATE(u64, smstate, 0x7e88);
        ctxt->ops->set_idt(ctxt, &dt);
 
-       selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
-       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
-       set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
-       set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
-       base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
+       selector =                  GET_SMSTATE(u32, smstate, 0x7e70);
+       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e72) << 8);
+       set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e74));
+       set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e78));
+       base3 =                     GET_SMSTATE(u32, smstate, 0x7e7c);
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
 
-       dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
-       dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
+       dt.size =                   GET_SMSTATE(u32, smstate, 0x7e64);
+       dt.address =                GET_SMSTATE(u64, smstate, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
        r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
@@ -2553,37 +2554,49 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
                return r;
 
        for (i = 0; i < 6; i++) {
-               r = rsm_load_seg_64(ctxt, smbase, i);
+               r = rsm_load_seg_64(ctxt, smstate, i);
                if (r != X86EMUL_CONTINUE)
                        return r;
        }
 
        return X86EMUL_CONTINUE;
 }
+#endif
 
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
 {
        unsigned long cr0, cr4, efer;
+       char buf[512];
        u64 smbase;
        int ret;
 
        if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
                return emulate_ud(ctxt);
 
+       smbase = ctxt->ops->get_smbase(ctxt);
+
+       ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
+       if (ret != X86EMUL_CONTINUE)
+               return X86EMUL_UNHANDLEABLE;
+
+       if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+               ctxt->ops->set_nmi_mask(ctxt, false);
+
+       ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+               ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+
        /*
         * Get back to real mode, to prepare a safe state in which to load
         * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
         * supports long mode.
         */
-       cr4 = ctxt->ops->get_cr(ctxt, 4);
        if (emulator_has_longmode(ctxt)) {
                struct desc_struct cs_desc;
 
                /* Zero CR4.PCIDE before CR0.PG.  */
-               if (cr4 & X86_CR4_PCIDE) {
+               cr4 = ctxt->ops->get_cr(ctxt, 4);
+               if (cr4 & X86_CR4_PCIDE)
                        ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
-                       cr4 &= ~X86_CR4_PCIDE;
-               }
 
                /* A 32-bit code segment is required to clear EFER.LMA.  */
                memset(&cs_desc, 0, sizeof(cs_desc));
@@ -2597,39 +2610,39 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
        if (cr0 & X86_CR0_PE)
                ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
 
-       /* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
-       if (cr4 & X86_CR4_PAE)
-               ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
-
-       /* And finally go back to 32-bit mode.  */
-       efer = 0;
-       ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+       if (emulator_has_longmode(ctxt)) {
+               /* Clear CR4.PAE before clearing EFER.LME. */
+               cr4 = ctxt->ops->get_cr(ctxt, 4);
+               if (cr4 & X86_CR4_PAE)
+                       ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
 
-       smbase = ctxt->ops->get_smbase(ctxt);
+               /* And finally go back to 32-bit mode.  */
+               efer = 0;
+               ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+       }
 
        /*
         * Give pre_leave_smm() a chance to make ISA-specific changes to the
         * vCPU state (e.g. enter guest mode) before loading state from the SMM
         * state-save area.
         */
-       if (ctxt->ops->pre_leave_smm(ctxt, smbase))
+       if (ctxt->ops->pre_leave_smm(ctxt, buf))
                return X86EMUL_UNHANDLEABLE;
 
+#ifdef CONFIG_X86_64
        if (emulator_has_longmode(ctxt))
-               ret = rsm_load_state_64(ctxt, smbase + 0x8000);
+               ret = rsm_load_state_64(ctxt, buf);
        else
-               ret = rsm_load_state_32(ctxt, smbase + 0x8000);
+#endif
+               ret = rsm_load_state_32(ctxt, buf);
 
        if (ret != X86EMUL_CONTINUE) {
                /* FIXME: should triple fault */
                return X86EMUL_UNHANDLEABLE;
        }
 
-       if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
-               ctxt->ops->set_nmi_mask(ctxt, false);
+       ctxt->ops->post_leave_smm(ctxt);
 
-       ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
-               ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
        return X86EMUL_CONTINUE;
 }
 
index 991fdf7fc17fbd9e1a4cab99d688a7af820d397c..9bf70cf845648f5e66143440166d57c5fd287bf9 100644 (file)
@@ -138,6 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
                if (offset <= max_apic_id) {
                        u8 cluster_size = min(max_apic_id - offset + 1, 16U);
 
+                       offset = array_index_nospec(offset, map->max_apic_id + 1);
                        *cluster = &map->phys_map[offset];
                        *mask = dest_id & (0xffff >> (16 - cluster_size));
                } else {
@@ -901,7 +902,8 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
                if (irq->dest_id > map->max_apic_id) {
                        *bitmap = 0;
                } else {
-                       *dst = &map->phys_map[irq->dest_id];
+                       u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
+                       *dst = &map->phys_map[dest_id];
                        *bitmap = 1;
                }
                return true;
index eee455a8a612d00a516bfe892a690bcd8bc91e39..e10962dfc2032d982f124070b88f7d625d2b8f0b 100644 (file)
@@ -2007,7 +2007,7 @@ static int is_empty_shadow_page(u64 *spt)
  * aggregate version in order to make the slab shrinker
  * faster
  */
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
 {
        kvm->arch.n_used_mmu_pages += nr;
        percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2238,7 +2238,7 @@ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
                                        struct list_head *invalid_list,
                                        bool remote_flush)
 {
-       if (!remote_flush && !list_empty(invalid_list))
+       if (!remote_flush && list_empty(invalid_list))
                return false;
 
        if (!list_empty(invalid_list))
@@ -2763,7 +2763,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
  * Changing the number of mmu pages allocated to the vm
  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
  */
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
 {
        LIST_HEAD(invalid_list);
 
@@ -6031,10 +6031,10 @@ int kvm_mmu_module_init(void)
 /*
  * Calculate mmu pages needed for kvm.
  */
-unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
+unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
 {
-       unsigned int nr_mmu_pages;
-       unsigned int  nr_pages = 0;
+       unsigned long nr_mmu_pages;
+       unsigned long nr_pages = 0;
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
        int i;
@@ -6047,8 +6047,7 @@ unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
        }
 
        nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
-       nr_mmu_pages = max(nr_mmu_pages,
-                          (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+       nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
 
        return nr_mmu_pages;
 }
index bbdc60f2fae89beb34c72716d9e7eb9c33584651..54c2a377795be6920bee9676e58555110c3a56b9 100644 (file)
@@ -64,7 +64,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                u64 fault_address, char *insn, int insn_len);
 
-static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
 {
        if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
                return kvm->arch.n_max_mmu_pages -
index 58ead7db71a312764b56d9f242e84820239eeb93..e39741997893a977fdda077ff637bf465fbb1748 100644 (file)
@@ -281,9 +281,13 @@ static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 {
        bool fast_mode = idx & (1u << 31);
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        u64 ctr_val;
 
+       if (!pmu->version)
+               return 1;
+
        if (is_vmware_backdoor_pmc(idx))
                return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
 
index e0a791c3d4fcc6bb3b1426632e828196953101d5..406b558abfef7379eb46bd2de18e5d6890079eb9 100644 (file)
@@ -262,6 +262,7 @@ struct amd_svm_iommu_ir {
 };
 
 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                        31
 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK               (1 << 31)
 
 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK   (0xFFULL)
@@ -2692,6 +2693,7 @@ static int npf_interception(struct vcpu_svm *svm)
 static int db_interception(struct vcpu_svm *svm)
 {
        struct kvm_run *kvm_run = svm->vcpu.run;
+       struct kvm_vcpu *vcpu = &svm->vcpu;
 
        if (!(svm->vcpu.guest_debug &
              (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
@@ -2702,6 +2704,8 @@ static int db_interception(struct vcpu_svm *svm)
 
        if (svm->nmi_singlestep) {
                disable_nmi_singlestep(svm);
+               /* Make sure we check for pending NMIs upon entry */
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
        }
 
        if (svm->vcpu.guest_debug &
@@ -4517,14 +4521,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
                kvm_lapic_reg_write(apic, APIC_ICR, icrl);
                break;
        case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
+               int i;
+               struct kvm_vcpu *vcpu;
+               struct kvm *kvm = svm->vcpu.kvm;
                struct kvm_lapic *apic = svm->vcpu.arch.apic;
 
                /*
-                * Update ICR high and low, then emulate sending IPI,
-                * which is handled when writing APIC_ICR.
+                * At this point, we expect that the AVIC HW has already
+                * set the appropriate IRR bits on the valid target
+                * vcpus. So, we just need to kick the appropriate vcpu.
                 */
-               kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
-               kvm_lapic_reg_write(apic, APIC_ICR, icrl);
+               kvm_for_each_vcpu(i, vcpu, kvm) {
+                       bool m = kvm_apic_match_dest(vcpu, apic,
+                                                    icrl & KVM_APIC_SHORT_MASK,
+                                                    GET_APIC_DEST_FIELD(icrh),
+                                                    icrl & KVM_APIC_DEST_MASK);
+
+                       if (m && !avic_vcpu_is_running(vcpu))
+                               kvm_vcpu_wake_up(vcpu);
+               }
                break;
        }
        case AVIC_IPI_FAILURE_INVALID_TARGET:
@@ -4596,7 +4611,7 @@ static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
        u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
 
        if (entry)
-               WRITE_ONCE(*entry, (u32) ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK);
+               clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
 }
 
 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
@@ -5621,6 +5636,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
        clgi();
+       kvm_load_guest_xcr0(vcpu);
 
        /*
         * If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -5766,6 +5782,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
                kvm_before_interrupt(&svm->vcpu);
 
+       kvm_put_guest_xcr0(vcpu);
        stgi();
 
        /* Any pending NMI will happen here */
@@ -6215,32 +6232,24 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct vmcb *nested_vmcb;
        struct page *page;
-       struct {
-               u64 guest;
-               u64 vmcb;
-       } svm_state_save;
-       int ret;
+       u64 guest;
+       u64 vmcb;
 
-       ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
-                                 sizeof(svm_state_save));
-       if (ret)
-               return ret;
+       guest = GET_SMSTATE(u64, smstate, 0x7ed8);
+       vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
 
-       if (svm_state_save.guest) {
-               vcpu->arch.hflags &= ~HF_SMM_MASK;
-               nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
-               if (nested_vmcb)
-                       enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
-               else
-                       ret = 1;
-               vcpu->arch.hflags |= HF_SMM_MASK;
+       if (guest) {
+               nested_vmcb = nested_svm_map(svm, vmcb, &page);
+               if (!nested_vmcb)
+                       return 1;
+               enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
        }
-       return ret;
+       return 0;
 }
 
 static int enable_smi_window(struct kvm_vcpu *vcpu)
index 6432d08c7de79ccbde654b7ab17c9649b75a25c2..4d47a2631d1fb46d9f913b59743cb5417d7401c6 100644 (file)
@@ -438,13 +438,13 @@ TRACE_EVENT(kvm_apic_ipi,
 );
 
 TRACE_EVENT(kvm_apic_accept_irq,
-           TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec),
+           TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
            TP_ARGS(apicid, dm, tm, vec),
 
        TP_STRUCT__entry(
                __field(        __u32,          apicid          )
                __field(        __u16,          dm              )
-               __field(        __u8,           tm              )
+               __field(        __u16,          tm              )
                __field(        __u8,           vec             )
        ),
 
index 7ec9bb1dd72315d7c725c3e197639b09ee3e6386..6401eb7ef19ce0e9f9258b617e001dfdac534a2a 100644 (file)
@@ -2873,20 +2873,27 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                /*
                 * If translation failed, VM entry will fail because
                 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
-                * Failing the vm entry is _not_ what the processor
-                * does but it's basically the only possibility we
-                * have.  We could still enter the guest if CR8 load
-                * exits are enabled, CR8 store exits are enabled, and
-                * virtualize APIC access is disabled; in this case
-                * the processor would never use the TPR shadow and we
-                * could simply clear the bit from the execution
-                * control.  But such a configuration is useless, so
-                * let's keep the code simple.
                 */
                if (!is_error_page(page)) {
                        vmx->nested.virtual_apic_page = page;
                        hpa = page_to_phys(vmx->nested.virtual_apic_page);
                        vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
+               } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
+                          nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
+                          !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+                       /*
+                        * The processor will never use the TPR shadow, simply
+                        * clear the bit from the execution control.  Such a
+                        * configuration is useless, but it happens in tests.
+                        * For any other configuration, failing the vm entry is
+                        * _not_ what the processor does but it's basically the
+                        * only possibility we have.
+                        */
+                       vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+                                       CPU_BASED_TPR_SHADOW);
+               } else {
+                       printk("bad virtual-APIC page address\n");
+                       dump_vmcs();
                }
        }
 
@@ -3789,8 +3796,18 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
        vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
 
        nested_ept_uninit_mmu_context(vcpu);
-       vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
-       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+       /*
+        * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3
+        * points to shadow pages!  Fortunately we only get here after a WARN_ON
+        * if EPT is disabled, so a VMabort is perfectly fine.
+        */
+       if (enable_ept) {
+               vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+               __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+       } else {
+               nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED);
+       }
 
        /*
         * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
@@ -5738,6 +5755,14 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
 {
        int i;
 
+       /*
+        * Without EPT it is not possible to restore L1's CR3 and PDPTR on
+        * VMfail, because they are not available in vmcs01.  Just always
+        * use hardware checks.
+        */
+       if (!enable_ept)
+               nested_early_check = 1;
+
        if (!cpu_has_vmx_shadow_vmcs())
                enable_shadow_vmcs = 0;
        if (enable_shadow_vmcs) {
index ab432a930ae865d0000d8273643de236d0738fb8..b4e7d645275a2153c42fa252cce8a8cbb930b59e 100644 (file)
@@ -5603,7 +5603,7 @@ static void vmx_dump_dtsel(char *name, uint32_t limit)
               vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
 }
 
-static void dump_vmcs(void)
+void dump_vmcs(void)
 {
        u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
        u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
@@ -6410,6 +6410,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
                vmx_set_interrupt_shadow(vcpu, 0);
 
+       kvm_load_guest_xcr0(vcpu);
+
        if (static_cpu_has(X86_FEATURE_PKU) &&
            kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
            vcpu->arch.pkru != vmx->host_pkru)
@@ -6506,6 +6508,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
                        __write_pkru(vmx->host_pkru);
        }
 
+       kvm_put_guest_xcr0(vcpu);
+
        vmx->nested.nested_run_pending = 0;
        vmx->idt_vectoring_info = 0;
 
@@ -6852,6 +6856,30 @@ static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
        }
 }
 
+static bool guest_cpuid_has_pmu(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *entry;
+       union cpuid10_eax eax;
+
+       entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+       if (!entry)
+               return false;
+
+       eax.full = entry->eax;
+       return (eax.split.version_id > 0);
+}
+
+static void nested_vmx_procbased_ctls_update(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       bool pmu_enabled = guest_cpuid_has_pmu(vcpu);
+
+       if (pmu_enabled)
+               vmx->nested.msrs.procbased_ctls_high |= CPU_BASED_RDPMC_EXITING;
+       else
+               vmx->nested.msrs.procbased_ctls_high &= ~CPU_BASED_RDPMC_EXITING;
+}
+
 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6940,6 +6968,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
        if (nested_vmx_allowed(vcpu)) {
                nested_vmx_cr_fixed1_bits_update(vcpu);
                nested_vmx_entry_exit_ctls_update(vcpu);
+               nested_vmx_procbased_ctls_update(vcpu);
        }
 
        if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
@@ -7369,7 +7398,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int ret;
@@ -7380,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
        }
 
        if (vmx->nested.smm.guest_mode) {
-               vcpu->arch.hflags &= ~HF_SMM_MASK;
                ret = nested_vmx_enter_non_root_mode(vcpu, false);
-               vcpu->arch.hflags |= HF_SMM_MASK;
                if (ret)
                        return ret;
 
index a1e00d0a2482c16b81be561c30a4d10d3233975b..f879529906b48cd84e99cc0f672210aaeaffeabd 100644 (file)
@@ -517,4 +517,6 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
        vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
 }
 
+void dump_vmcs(void);
+
 #endif /* __KVM_X86_VMX_H */
index 099b851dabafd7e2980f96472209777f9cc8f77b..a0d1fc80ac5a8407c123d8df12eb2215d4d70392 100644 (file)
@@ -800,7 +800,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
                        !vcpu->guest_xcr0_loaded) {
@@ -810,8 +810,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 1;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
 
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (vcpu->guest_xcr0_loaded) {
                if (vcpu->arch.xcr0 != host_xcr0)
@@ -819,6 +820,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 0;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
 
 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
@@ -3093,7 +3095,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
        case KVM_CAP_NESTED_STATE:
                r = kvm_x86_ops->get_nested_state ?
-                       kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
+                       kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0;
                break;
        default:
                break;
@@ -3528,7 +3530,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
+static void kvm_smm_changed(struct kvm_vcpu *vcpu);
 
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                              struct kvm_vcpu_events *events)
@@ -3588,12 +3590,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
        if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
-               u32 hflags = vcpu->arch.hflags;
-               if (events->smi.smm)
-                       hflags |= HF_SMM_MASK;
-               else
-                       hflags &= ~HF_SMM_MASK;
-               kvm_set_hflags(vcpu, hflags);
+               if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
+                       if (events->smi.smm)
+                               vcpu->arch.hflags |= HF_SMM_MASK;
+                       else
+                               vcpu->arch.hflags &= ~HF_SMM_MASK;
+                       kvm_smm_changed(vcpu);
+               }
 
                vcpu->arch.smi_pending = events->smi.pending;
 
@@ -4270,7 +4273,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
 }
 
 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
-                                         u32 kvm_nr_mmu_pages)
+                                        unsigned long kvm_nr_mmu_pages)
 {
        if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
                return -EINVAL;
@@ -4284,7 +4287,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
        return 0;
 }
 
-static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
+static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
 {
        return kvm->arch.n_max_mmu_pages;
 }
@@ -5958,12 +5961,18 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
 
 static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
 {
-       kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+       emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
+}
+
+static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+                                 const char *smstate)
+{
+       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);
 }
 
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
 {
-       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
+       kvm_smm_changed(emul_to_vcpu(ctxt));
 }
 
 static const struct x86_emulate_ops emulate_ops = {
@@ -6006,6 +6015,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .get_hflags          = emulator_get_hflags,
        .set_hflags          = emulator_set_hflags,
        .pre_leave_smm       = emulator_pre_leave_smm,
+       .post_leave_smm      = emulator_post_leave_smm,
 };
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -6247,16 +6257,6 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
        kvm_mmu_reset_context(vcpu);
 }
 
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
-{
-       unsigned changed = vcpu->arch.hflags ^ emul_flags;
-
-       vcpu->arch.hflags = emul_flags;
-
-       if (changed & HF_SMM_MASK)
-               kvm_smm_changed(vcpu);
-}
-
 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
                                unsigned long *db)
 {
@@ -7441,9 +7441,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
        put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
 }
 
+#ifdef CONFIG_X86_64
 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 {
-#ifdef CONFIG_X86_64
        struct desc_ptr dt;
        struct kvm_segment seg;
        unsigned long val;
@@ -7493,10 +7493,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 
        for (i = 0; i < 6; i++)
                enter_smm_save_seg_64(vcpu, buf, i);
-#else
-       WARN_ON_ONCE(1);
-#endif
 }
+#endif
 
 static void enter_smm(struct kvm_vcpu *vcpu)
 {
@@ -7507,9 +7505,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 
        trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
        memset(buf, 0, 512);
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                enter_smm_save_state_64(vcpu, buf);
        else
+#endif
                enter_smm_save_state_32(vcpu, buf);
 
        /*
@@ -7567,8 +7567,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
        kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
        kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
 
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                kvm_x86_ops->set_efer(vcpu, 0);
+#endif
 
        kvm_update_cpuid(vcpu);
        kvm_mmu_reset_context(vcpu);
@@ -7865,8 +7867,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                goto cancel_injection;
        }
 
-       kvm_load_guest_xcr0(vcpu);
-
        if (req_immediate_exit) {
                kvm_make_request(KVM_REQ_EVENT, vcpu);
                kvm_x86_ops->request_immediate_exit(vcpu);
@@ -7919,8 +7919,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
 
-       kvm_put_guest_xcr0(vcpu);
-
        kvm_before_interrupt(vcpu);
        kvm_x86_ops->handle_external_intr(vcpu);
        kvm_after_interrupt(vcpu);
index 28406aa1136d7eb772ed712f9df34ffe14290e66..aedc5d0d4989b3fc7422c17e55fc6b65bfef06a3 100644 (file)
@@ -347,4 +347,6 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
        __this_cpu_write(current_vcpu, NULL);
 }
 
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
 #endif
index ee8f8ab469417c6eb0f06aa6d4390a0eec8162b1..c0309ea9abee4201b1f697e9e61e20296f359736 100644 (file)
@@ -259,7 +259,8 @@ static void note_wx(struct pg_state *st)
 #endif
        /* Account the WX pages */
        st->wx_pages += npages;
-       WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
+       WARN_ONCE(__supported_pte_mask & _PAGE_NX,
+                 "x86/mm: Found insecure W+X mapping at address %pS\n",
                  (void *)st->start_address);
 }
 
index f905a2371080beee339dac3f5dad13d0ab1e7ef8..8dacdb96899ec5a76749751d2675b5b827855141 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/memblock.h>
 #include <linux/swapfile.h>
 #include <linux/swapops.h>
+#include <linux/kmemleak.h>
 
 #include <asm/set_memory.h>
 #include <asm/e820/api.h>
@@ -766,6 +767,11 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
        if (debug_pagealloc_enabled()) {
                pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
                        begin, end - 1);
+               /*
+                * Inform kmemleak about the hole in the memory since the
+                * corresponding pages will be unmapped.
+                */
+               kmemleak_free_part((void *)begin, end - begin);
                set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
        } else {
                /*
index 0029604af8a411397c019f066fae8dee7df8c805..dd73d5d74393f7c987e9c4c18fde1f698d9213ae 100644 (file)
@@ -825,7 +825,7 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
        pte = early_ioremap_pte(addr);
 
        /* Sanitize 'prot' against any unsupported bits: */
-       pgprot_val(flags) &= __default_kernel_pte_mask;
+       pgprot_val(flags) &= __supported_pte_mask;
 
        if (pgprot_val(flags))
                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
index 3f452ffed7e93f377aa1ae38150a1f2cf7e91a5c..d669c5e797e06e27a891f099739deb21e165673b 100644 (file)
@@ -94,7 +94,7 @@ void __init kernel_randomize_memory(void)
        if (!kaslr_memory_enabled())
                return;
 
-       kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
+       kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
        kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
 
        /*
index bc4bc7b2f075d3f302ba25dc261b759ab89dab97..487b8474c01cde006241a4c9a732bfe6aae53ff6 100644 (file)
@@ -728,7 +728,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 {
        int cpu;
 
-       struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
+       struct flush_tlb_info info = {
                .mm = mm,
                .stride_shift = stride_shift,
                .freed_tables = freed_tables,
index 6af49929de857b24f9ddb134ef79efd0f7c08e72..30084eaf84227ac89eb6e5baa0aed77d6fad5f50 100644 (file)
 421    common  rt_sigtimedwait_time64          sys_rt_sigtimedwait
 422    common  futex_time64                    sys_futex
 423    common  sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index dfb8cb0af13a872737e07647de307e9e44017f0d..5ba1e0d841b4d552e3858ad30888e489669772bd 100644 (file)
@@ -5396,7 +5396,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
        return min_shallow;
 }
 
-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
 {
        struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
        struct blk_mq_tags *tags = hctx->sched_tags;
@@ -5404,6 +5404,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
 
        min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
        sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
+}
+
+static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+{
+       bfq_depth_updated(hctx);
        return 0;
 }
 
@@ -5826,6 +5831,7 @@ static struct elevator_type iosched_bfq_mq = {
                .requests_merged        = bfq_requests_merged,
                .request_merged         = bfq_request_merged,
                .has_work               = bfq_has_work,
+               .depth_updated          = bfq_depth_updated,
                .init_hctx              = bfq_init_hctx,
                .init_sched             = bfq_init_queue,
                .exit_sched             = bfq_exit_queue,
index 9516304a38ee37c70f897185e6bfe94fc7fe79f8..fc60ed7e940ead5ae7d7332ee9f64b9ffe922aca 100644 (file)
@@ -3135,6 +3135,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
                }
                if (ret)
                        break;
+               if (q->elevator && q->elevator->type->ops.depth_updated)
+                       q->elevator->type->ops.depth_updated(hctx);
        }
 
        if (!ret)
index 0430ccd08728655ecf4e44c65ef16e956bd928c1..08a0e458bc3e62dcb17e0dbec9151290b895aa33 100644 (file)
@@ -212,8 +212,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
 {
        struct skcipher_request *req = areq->data;
 
-       if (!err)
+       if (!err) {
+               struct rctx *rctx = skcipher_request_ctx(req);
+
+               rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
                err = xor_tweak_post(req);
+       }
 
        skcipher_request_complete(req, err);
 }
index f267633cf13ac83e81d7c1e8c7462e18228a2e38..d18a37629f0537271fcfd95912e0b9f1eac2133e 100644 (file)
@@ -5634,7 +5634,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
                .psize          = 80,
                .digest         = "\x13\x00\x00\x00\x00\x00\x00\x00"
                                  "\x00\x00\x00\x00\x00\x00\x00\x00",
-       },
+       }, { /* Regression test for overflow in AVX2 implementation */
+               .plaintext      = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff",
+               .psize          = 300,
+               .digest         = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
+                                 "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
+       }
 };
 
 /* NHPoly1305 test vectors from https://github.com/google/adiantum */
index 847f54f7678972dcc50c06b867ac2842a7535b4a..2f948328cabbd97f8504941b4fd50cce1122b086 100644 (file)
@@ -137,8 +137,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
 {
        struct skcipher_request *req = areq->data;
 
-       if (!err)
+       if (!err) {
+               struct rctx *rctx = skcipher_request_ctx(req);
+
+               rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
                err = xor_tweak_post(req);
+       }
 
        skcipher_request_complete(req, err);
 }
index 5a389a4f4f652edda26c109baf5e595bf6325903..f1ed0befe303d241c4537e446daad3726e62dbb4 100644 (file)
@@ -567,6 +567,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                goto out;
        }
 
+       dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
+                       cmd_name, out_obj->buffer.length);
+       print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
+                       out_obj->buffer.pointer,
+                       min_t(u32, 128, out_obj->buffer.length), true);
+
        if (call_pkg) {
                call_pkg->nd_fw_size = out_obj->buffer.length;
                memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@@ -585,12 +591,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                return 0;
        }
 
-       dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
-                       cmd_name, out_obj->buffer.length);
-       print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
-                       out_obj->buffer.pointer,
-                       min_t(u32, 128, out_obj->buffer.length), true);
-
        for (i = 0, offset = 0; i < desc->out_num; i++) {
                u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
                                (u32 *) out_obj->buffer.pointer,
index f70de71f79d6a699442f430dfa6606ad18a8f2dc..cddd0fcf622c3314f7115e86124e9dde5a5f98ff 100644 (file)
@@ -122,9 +122,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
        if (!test_bit(cmd, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (old_data)
-               memcpy(nd_cmd.cmd.old_pass, old_data->data,
-                               sizeof(nd_cmd.cmd.old_pass));
+       memcpy(nd_cmd.cmd.old_pass, old_data->data,
+                       sizeof(nd_cmd.cmd.old_pass));
        memcpy(nd_cmd.cmd.new_pass, new_data->data,
                        sizeof(nd_cmd.cmd.new_pass));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -336,9 +335,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
 
        /* flush all cache before we erase DIMM */
        nvdimm_invalidate_cache();
-       if (nkey)
-               memcpy(nd_cmd.cmd.passphrase, nkey->data,
-                               sizeof(nd_cmd.cmd.passphrase));
+       memcpy(nd_cmd.cmd.passphrase, nkey->data,
+                       sizeof(nd_cmd.cmd.passphrase));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
        if (rc < 0)
                return rc;
index a303fd0e108ccbe8180d233462deea15b5f570f9..c73d3a62799a6637a5c77cec5b27a16266242f51 100644 (file)
@@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
                        acpi_processor_ppc_ost(pr->handle, 0);
        }
        if (ret >= 0)
-               cpufreq_update_policy(pr->id);
+               cpufreq_update_limits(pr->id);
 }
 
 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
index 11e1663bdc4dee0e2cfd7cd9ba61783d00277bbf..b2c06da4f62e336ce262f1445e3329803da4d5cb 100644 (file)
@@ -1646,7 +1646,7 @@ static irqreturn_t fs_irq (int irq, void *dev_id)
        }
 
        if (status & ISR_TBRQ_W) {
-               fs_dprintk (FS_DEBUG_IRQ, "Data tramsitted!\n");
+               fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n");
                process_txdone_queue (dev, &dev->tx_relq);
        }
 
index cb8347500ce2871e5003d8ce45a4014a97b8de3e..e49028a604295937a59761488d431b9f4837f731 100644 (file)
@@ -506,7 +506,7 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
 
        ret = lock_device_hotplug_sysfs();
        if (ret)
-               goto out;
+               return ret;
 
        nid = memory_add_physaddr_to_nid(phys_addr);
        ret = __add_memory(nid, phys_addr,
index 96a6dc9d305c88b842258f4ed91cc7b7b569d506..3d899e8abd585f09f49f8d5de7218a8d2357db55 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/sched.h>
 #include <linux/suspend.h>
 #include <linux/export.h>
+#include <linux/cpu.h>
 
 #include "power.h"
 
@@ -128,6 +129,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
 #define genpd_is_irq_safe(genpd)       (genpd->flags & GENPD_FLAG_IRQ_SAFE)
 #define genpd_is_always_on(genpd)      (genpd->flags & GENPD_FLAG_ALWAYS_ON)
 #define genpd_is_active_wakeup(genpd)  (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
+#define genpd_is_cpu_domain(genpd)     (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
 
 static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
                const struct generic_pm_domain *genpd)
@@ -391,11 +393,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
        if (unlikely(!genpd->set_performance_state))
                return -EINVAL;
 
-       if (unlikely(!dev->power.subsys_data ||
-                    !dev->power.subsys_data->domain_data)) {
-               WARN_ON(1);
+       if (WARN_ON(!dev->power.subsys_data ||
+                    !dev->power.subsys_data->domain_data))
                return -EINVAL;
-       }
 
        genpd_lock(genpd);
 
@@ -1396,8 +1396,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
 
 #endif /* CONFIG_PM_SLEEP */
 
-static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
-                                       struct gpd_timing_data *td)
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
 {
        struct generic_pm_domain_data *gpd_data;
        int ret;
@@ -1412,9 +1411,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
                goto err_put;
        }
 
-       if (td)
-               gpd_data->td = *td;
-
        gpd_data->base.dev = dev;
        gpd_data->td.constraint_changed = true;
        gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
@@ -1454,8 +1450,57 @@ static void genpd_free_dev_data(struct device *dev,
        dev_pm_put_subsys_data(dev);
 }
 
+static void genpd_update_cpumask(struct generic_pm_domain *genpd,
+                                int cpu, bool set, unsigned int depth)
+{
+       struct gpd_link *link;
+
+       if (!genpd_is_cpu_domain(genpd))
+               return;
+
+       list_for_each_entry(link, &genpd->slave_links, slave_node) {
+               struct generic_pm_domain *master = link->master;
+
+               genpd_lock_nested(master, depth + 1);
+               genpd_update_cpumask(master, cpu, set, depth + 1);
+               genpd_unlock(master);
+       }
+
+       if (set)
+               cpumask_set_cpu(cpu, genpd->cpus);
+       else
+               cpumask_clear_cpu(cpu, genpd->cpus);
+}
+
+static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+       if (cpu >= 0)
+               genpd_update_cpumask(genpd, cpu, true, 0);
+}
+
+static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+       if (cpu >= 0)
+               genpd_update_cpumask(genpd, cpu, false, 0);
+}
+
+static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
+{
+       int cpu;
+
+       if (!genpd_is_cpu_domain(genpd))
+               return -1;
+
+       for_each_possible_cpu(cpu) {
+               if (get_cpu_device(cpu) == dev)
+                       return cpu;
+       }
+
+       return -1;
+}
+
 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
-                           struct gpd_timing_data *td)
+                           struct device *base_dev)
 {
        struct generic_pm_domain_data *gpd_data;
        int ret;
@@ -1465,16 +1510,19 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
        if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
                return -EINVAL;
 
-       gpd_data = genpd_alloc_dev_data(dev, td);
+       gpd_data = genpd_alloc_dev_data(dev);
        if (IS_ERR(gpd_data))
                return PTR_ERR(gpd_data);
 
+       gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
+
        ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
        if (ret)
                goto out;
 
        genpd_lock(genpd);
 
+       genpd_set_cpumask(genpd, gpd_data->cpu);
        dev_pm_domain_set(dev, &genpd->domain);
 
        genpd->device_count++;
@@ -1502,7 +1550,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
        int ret;
 
        mutex_lock(&gpd_list_lock);
-       ret = genpd_add_device(genpd, dev, NULL);
+       ret = genpd_add_device(genpd, dev, dev);
        mutex_unlock(&gpd_list_lock);
 
        return ret;
@@ -1532,6 +1580,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
        genpd->device_count--;
        genpd->max_off_time_changed = true;
 
+       genpd_clear_cpumask(genpd, gpd_data->cpu);
        dev_pm_domain_set(dev, NULL);
 
        list_del_init(&pdd->list_node);
@@ -1686,6 +1735,12 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
 }
 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
 
+static void genpd_free_default_power_state(struct genpd_power_state *states,
+                                          unsigned int state_count)
+{
+       kfree(states);
+}
+
 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
 {
        struct genpd_power_state *state;
@@ -1696,7 +1751,7 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
 
        genpd->states = state;
        genpd->state_count = 1;
-       genpd->free = state;
+       genpd->free_states = genpd_free_default_power_state;
 
        return 0;
 }
@@ -1762,11 +1817,18 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
        if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
                return -EINVAL;
 
+       if (genpd_is_cpu_domain(genpd) &&
+           !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
+               return -ENOMEM;
+
        /* Use only one "off" state if there were no states declared */
        if (genpd->state_count == 0) {
                ret = genpd_set_default_power_state(genpd);
-               if (ret)
+               if (ret) {
+                       if (genpd_is_cpu_domain(genpd))
+                               free_cpumask_var(genpd->cpus);
                        return ret;
+               }
        } else if (!gov && genpd->state_count > 1) {
                pr_warn("%s: no governor for states\n", genpd->name);
        }
@@ -1812,7 +1874,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
        list_del(&genpd->gpd_list_node);
        genpd_unlock(genpd);
        cancel_work_sync(&genpd->power_off_work);
-       kfree(genpd->free);
+       if (genpd_is_cpu_domain(genpd))
+               free_cpumask_var(genpd->cpus);
+       if (genpd->free_states)
+               genpd->free_states(genpd->states, genpd->state_count);
+
        pr_debug("%s: removed %s\n", __func__, genpd->name);
 
        return 0;
@@ -2190,7 +2256,7 @@ int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
                goto out;
        }
 
-       ret = genpd_add_device(genpd, dev, NULL);
+       ret = genpd_add_device(genpd, dev, dev);
 
 out:
        mutex_unlock(&gpd_list_lock);
@@ -2274,6 +2340,7 @@ EXPORT_SYMBOL_GPL(of_genpd_remove_last);
 
 static void genpd_release_dev(struct device *dev)
 {
+       of_node_put(dev->of_node);
        kfree(dev);
 }
 
@@ -2335,14 +2402,14 @@ static void genpd_dev_pm_sync(struct device *dev)
        genpd_queue_power_off_work(pd);
 }
 
-static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
+static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
                                 unsigned int index, bool power_on)
 {
        struct of_phandle_args pd_args;
        struct generic_pm_domain *pd;
        int ret;
 
-       ret = of_parse_phandle_with_args(np, "power-domains",
+       ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
                                "#power-domain-cells", index, &pd_args);
        if (ret < 0)
                return ret;
@@ -2354,12 +2421,12 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
                mutex_unlock(&gpd_list_lock);
                dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
                        __func__, PTR_ERR(pd));
-               return driver_deferred_probe_check_state(dev);
+               return driver_deferred_probe_check_state(base_dev);
        }
 
        dev_dbg(dev, "adding to PM domain %s\n", pd->name);
 
-       ret = genpd_add_device(pd, dev, NULL);
+       ret = genpd_add_device(pd, dev, base_dev);
        mutex_unlock(&gpd_list_lock);
 
        if (ret < 0) {
@@ -2410,7 +2477,7 @@ int genpd_dev_pm_attach(struct device *dev)
                                       "#power-domain-cells") != 1)
                return 0;
 
-       return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
+       return __genpd_dev_pm_attach(dev, dev, 0, true);
 }
 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
 
@@ -2440,10 +2507,10 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
        if (!dev->of_node)
                return NULL;
 
-       /* Deal only with devices using multiple PM domains. */
+       /* Verify that the index is within a valid range. */
        num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
                                                 "#power-domain-cells");
-       if (num_domains < 2 || index >= num_domains)
+       if (index >= num_domains)
                return NULL;
 
        /* Allocate and register device on the genpd bus. */
@@ -2454,15 +2521,16 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
        dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
        virt_dev->bus = &genpd_bus_type;
        virt_dev->release = genpd_release_dev;
+       virt_dev->of_node = of_node_get(dev->of_node);
 
        ret = device_register(virt_dev);
        if (ret) {
-               kfree(virt_dev);
+               put_device(virt_dev);
                return ERR_PTR(ret);
        }
 
        /* Try to attach the device to the PM domain at the specified index. */
-       ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
+       ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
        if (ret < 1) {
                device_unregister(virt_dev);
                return ret ? ERR_PTR(ret) : NULL;
index 4d07e38a8247f8ab354a79c9252a9d79d5746f84..7912bc9572447f3b0ee7fe759c93a03f74d4d972 100644 (file)
@@ -10,6 +10,9 @@
 #include <linux/pm_domain.h>
 #include <linux/pm_qos.h>
 #include <linux/hrtimer.h>
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/ktime.h>
 
 static int dev_update_qos_constraint(struct device *dev, void *data)
 {
@@ -210,8 +213,10 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
        struct generic_pm_domain *genpd = pd_to_genpd(pd);
        struct gpd_link *link;
 
-       if (!genpd->max_off_time_changed)
+       if (!genpd->max_off_time_changed) {
+               genpd->state_idx = genpd->cached_power_down_state_idx;
                return genpd->cached_power_down_ok;
+       }
 
        /*
         * We have to invalidate the cached results for the masters, so
@@ -236,6 +241,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
                genpd->state_idx--;
        }
 
+       genpd->cached_power_down_state_idx = genpd->state_idx;
        return genpd->cached_power_down_ok;
 }
 
@@ -244,6 +250,65 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
        return false;
 }
 
+#ifdef CONFIG_CPU_IDLE
+static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+{
+       struct generic_pm_domain *genpd = pd_to_genpd(pd);
+       struct cpuidle_device *dev;
+       ktime_t domain_wakeup, next_hrtimer;
+       s64 idle_duration_ns;
+       int cpu, i;
+
+       /* Validate dev PM QoS constraints. */
+       if (!default_power_down_ok(pd))
+               return false;
+
+       if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
+               return true;
+
+       /*
+        * Find the next wakeup for any of the online CPUs within the PM domain
+        * and its subdomains. Note, we only need the genpd->cpus, as it already
+        * contains a mask of all CPUs from subdomains.
+        */
+       domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
+       for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
+               dev = per_cpu(cpuidle_devices, cpu);
+               if (dev) {
+                       next_hrtimer = READ_ONCE(dev->next_hrtimer);
+                       if (ktime_before(next_hrtimer, domain_wakeup))
+                               domain_wakeup = next_hrtimer;
+               }
+       }
+
+       /* The minimum idle duration is from now - until the next wakeup. */
+       idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
+       if (idle_duration_ns <= 0)
+               return false;
+
+       /*
+        * Find the deepest idle state that has its residency value satisfied
+        * and by also taking into account the power off latency for the state.
+        * Start at the state picked by the dev PM QoS constraint validation.
+        */
+       i = genpd->state_idx;
+       do {
+               if (idle_duration_ns >= (genpd->states[i].residency_ns +
+                   genpd->states[i].power_off_latency_ns)) {
+                       genpd->state_idx = i;
+                       return true;
+               }
+       } while (--i >= 0);
+
+       return false;
+}
+
+struct dev_power_governor pm_domain_cpu_gov = {
+       .suspend_ok = default_suspend_ok,
+       .power_down_ok = cpu_power_down_ok,
+};
+#endif
+
 struct dev_power_governor simple_qos_governor = {
        .suspend_ok = default_suspend_ok,
        .power_down_ok = default_power_down_ok,
index f80d298de3fa42d1e535e035e012975054920c6a..43e863cc0c1b82e5d8d3b69c7416f813ef0d5738 100644 (file)
@@ -478,7 +478,7 @@ struct dpm_watchdog {
 
 /**
  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
- * @data: Watchdog object address.
+ * @t: The timer that PM watchdog depends on.
  *
  * Called when a driver has timed out suspending or resuming.
  * There's not much we can do here to recover so panic() to
@@ -706,6 +706,19 @@ static bool is_async(struct device *dev)
                && !pm_trace_is_enabled();
 }
 
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+       reinit_completion(&dev->power.completion);
+
+       if (is_async(dev)) {
+               get_device(dev);
+               async_schedule(func, dev);
+               return true;
+       }
+
+       return false;
+}
+
 static void async_resume_noirq(void *data, async_cookie_t cookie)
 {
        struct device *dev = (struct device *)data;
@@ -732,13 +745,8 @@ void dpm_noirq_resume_devices(pm_message_t state)
         * in case the starting of async threads is
         * delayed by non-async resuming devices.
         */
-       list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
-               reinit_completion(&dev->power.completion);
-               if (is_async(dev)) {
-                       get_device(dev);
-                       async_schedule_dev(async_resume_noirq, dev);
-               }
-       }
+       list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+               dpm_async_fn(dev, async_resume_noirq);
 
        while (!list_empty(&dpm_noirq_list)) {
                dev = to_device(dpm_noirq_list.next);
@@ -889,13 +897,8 @@ void dpm_resume_early(pm_message_t state)
         * in case the starting of async threads is
         * delayed by non-async resuming devices.
         */
-       list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
-               reinit_completion(&dev->power.completion);
-               if (is_async(dev)) {
-                       get_device(dev);
-                       async_schedule_dev(async_resume_early, dev);
-               }
-       }
+       list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+               dpm_async_fn(dev, async_resume_early);
 
        while (!list_empty(&dpm_late_early_list)) {
                dev = to_device(dpm_late_early_list.next);
@@ -1053,13 +1056,8 @@ void dpm_resume(pm_message_t state)
        pm_transition = state;
        async_error = 0;
 
-       list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
-               reinit_completion(&dev->power.completion);
-               if (is_async(dev)) {
-                       get_device(dev);
-                       async_schedule_dev(async_resume, dev);
-               }
-       }
+       list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+               dpm_async_fn(dev, async_resume);
 
        while (!list_empty(&dpm_suspended_list)) {
                dev = to_device(dpm_suspended_list.next);
@@ -1373,13 +1371,9 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
 
 static int device_suspend_noirq(struct device *dev)
 {
-       reinit_completion(&dev->power.completion);
-
-       if (is_async(dev)) {
-               get_device(dev);
-               async_schedule_dev(async_suspend_noirq, dev);
+       if (dpm_async_fn(dev, async_suspend_noirq))
                return 0;
-       }
+
        return __device_suspend_noirq(dev, pm_transition, false);
 }
 
@@ -1576,13 +1570,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
 
 static int device_suspend_late(struct device *dev)
 {
-       reinit_completion(&dev->power.completion);
-
-       if (is_async(dev)) {
-               get_device(dev);
-               async_schedule_dev(async_suspend_late, dev);
+       if (dpm_async_fn(dev, async_suspend_late))
                return 0;
-       }
 
        return __device_suspend_late(dev, pm_transition, false);
 }
@@ -1747,6 +1736,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        if (dev->power.syscore)
                goto Complete;
 
+       /* Avoid direct_complete to let wakeup_path propagate. */
+       if (device_may_wakeup(dev) || dev->power.wakeup_path)
+               dev->power.direct_complete = false;
+
        if (dev->power.direct_complete) {
                if (pm_runtime_status_suspended(dev)) {
                        pm_runtime_disable(dev);
@@ -1842,13 +1835,8 @@ static void async_suspend(void *data, async_cookie_t cookie)
 
 static int device_suspend(struct device *dev)
 {
-       reinit_completion(&dev->power.completion);
-
-       if (is_async(dev)) {
-               get_device(dev);
-               async_schedule_dev(async_suspend, dev);
+       if (dpm_async_fn(dev, async_suspend))
                return 0;
-       }
 
        return __device_suspend(dev, pm_transition, false);
 }
@@ -2069,8 +2057,8 @@ EXPORT_SYMBOL_GPL(__suspend_report_result);
 
 /**
  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
- * @dev: Device to wait for.
  * @subordinate: Device that needs to wait for @dev.
+ * @dev: Device to wait for.
  */
 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
 {
index bb1ae175fae136a57cd85e7566af617706e29232..23c243a4c6754d73a7e51e42c8f671fbd8dac774 100644 (file)
@@ -804,7 +804,7 @@ void pm_print_active_wakeup_sources(void)
        srcuidx = srcu_read_lock(&wakeup_srcu);
        list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
                if (ws->active) {
-                       pr_debug("active wakeup source: %s\n", ws->name);
+                       pm_pr_dbg("active wakeup source: %s\n", ws->name);
                        active = 1;
                } else if (!active &&
                           (!last_activity_ws ||
@@ -815,7 +815,7 @@ void pm_print_active_wakeup_sources(void)
        }
 
        if (!active && last_activity_ws)
-               pr_debug("last active wakeup source: %s\n",
+               pm_pr_dbg("last active wakeup source: %s\n",
                        last_activity_ws->name);
        srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
@@ -845,7 +845,7 @@ bool pm_wakeup_pending(void)
        raw_spin_unlock_irqrestore(&events_lock, flags);
 
        if (ret) {
-               pr_debug("Wakeup pending, aborting suspend\n");
+               pm_pr_dbg("Wakeup pending, aborting suspend\n");
                pm_print_active_wakeup_sources();
        }
 
index 399cad7daae77b37508033ec1cac61bebefbc550..d58a359a66225f39682c067739eb9843bae36b80 100644 (file)
@@ -774,18 +774,18 @@ struct zram_work {
        struct zram *zram;
        unsigned long entry;
        struct bio *bio;
+       struct bio_vec bvec;
 };
 
 #if PAGE_SIZE != 4096
 static void zram_sync_read(struct work_struct *work)
 {
-       struct bio_vec bvec;
        struct zram_work *zw = container_of(work, struct zram_work, work);
        struct zram *zram = zw->zram;
        unsigned long entry = zw->entry;
        struct bio *bio = zw->bio;
 
-       read_from_bdev_async(zram, &bvec, entry, bio);
+       read_from_bdev_async(zram, &zw->bvec, entry, bio);
 }
 
 /*
@@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
 {
        struct zram_work work;
 
+       work.bvec = *bvec;
        work.zram = zram;
        work.entry = entry;
        work.bio = bio;
index ff0b199be4729757743bbd72bff2fc61a842d291..f2411468f33ff44707e45ab34cd359d2c3b5a0f0 100644 (file)
@@ -66,7 +66,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
                return;
        }
 
-       memset(&p, 0, sizeof(p));
        p.addr = base_addr;
        p.space = space;
        p.regspacing = offset;
index e8ba678347466db181a08768158e56930090aa7b..00bf4b17edbfafb5c9d25cb524f35e8d59c7a074 100644 (file)
@@ -214,6 +214,9 @@ struct ipmi_user {
 
        /* Does this interface receive IPMI events? */
        bool gets_events;
+
+       /* Free must run in process context for RCU cleanup. */
+       struct work_struct remove_work;
 };
 
 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
@@ -1157,6 +1160,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
        return rv;
 }
 
+static void free_user_work(struct work_struct *work)
+{
+       struct ipmi_user *user = container_of(work, struct ipmi_user,
+                                             remove_work);
+
+       cleanup_srcu_struct(&user->release_barrier);
+       kfree(user);
+}
+
 int ipmi_create_user(unsigned int          if_num,
                     const struct ipmi_user_hndl *handler,
                     void                  *handler_data,
@@ -1200,6 +1212,8 @@ int ipmi_create_user(unsigned int          if_num,
        goto out_kfree;
 
  found:
+       INIT_WORK(&new_user->remove_work, free_user_work);
+
        rv = init_srcu_struct(&new_user->release_barrier);
        if (rv)
                goto out_kfree;
@@ -1260,8 +1274,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
 static void free_user(struct kref *ref)
 {
        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-       cleanup_srcu_struct(&user->release_barrier);
-       kfree(user);
+
+       /* SRCU cleanup must happen in task context. */
+       schedule_work(&user->remove_work);
 }
 
 static void _ipmi_destroy_user(struct ipmi_user *user)
index 01946cad3d1381ba7eed020544c775ed9a6e3f5f..682221eebd66101cb67b04e2ac979ab7caeae51c 100644 (file)
@@ -118,6 +118,8 @@ void __init ipmi_hardcode_init(void)
        char *str;
        char *si_type[SI_MAX_PARMS];
 
+       memset(si_type, 0, sizeof(si_type));
+
        /* Parse out the si_type string into its components. */
        str = si_type_str;
        if (*str != '\0') {
index 171502a356aa1fb19bf285cdc5aade19ab861723..4b3d143f0f8a4445df12fcf589bb67fdfaa985df 100644 (file)
@@ -145,6 +145,7 @@ config VT8500_TIMER
 config NPCM7XX_TIMER
        bool "NPCM7xx timer driver" if COMPILE_TEST
        depends on HAS_IOMEM
+       select TIMER_OF
        select CLKSRC_MMIO
        help
          Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
index aa4ec53281cea585214c3a5f8b4faf941e7e7bd3..ea373cfbcecb5d8241f6a176a4a32a86a630c083 100644 (file)
@@ -9,7 +9,7 @@
  * published by the Free Software Foundation.
  */
 
-#define pr_fmt(fmt)    "arm_arch_timer: " fmt
+#define pr_fmt(fmt)    "arch_timer: " fmt
 
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -33,9 +33,6 @@
 
 #include <clocksource/arm_arch_timer.h>
 
-#undef pr_fmt
-#define pr_fmt(fmt) "arch_timer: " fmt
-
 #define CNTTIDR                0x08
 #define CNTTIDR_VIRT(n)        (BIT(1) << ((n) * 4))
 
index eed6feff8b5f23673de989932afcd806e858ecfc..30c6f4ce672b3b1ac16645159398c66e3129aae9 100644 (file)
@@ -296,4 +296,4 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
 TIMER_OF_DECLARE(ox810se_rps,
                       "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
 TIMER_OF_DECLARE(ox820_rps,
-                      "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init);
+                      "oxsemi,ox820-rps-timer", oxnas_rps_timer_init);
index 3352da6ed61f39139eb46aba585dfa0136697e80..ee8ec5a8cb1668aa770fb0c99af9dd1dc89a3ad8 100644 (file)
@@ -585,34 +585,6 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
        return 0;
 }
 
-/* Optimized set_load which removes costly spin wait in timer_start */
-static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
-                                       int autoreload, unsigned int load)
-{
-       u32 l;
-
-       if (unlikely(!timer))
-               return -EINVAL;
-
-       omap_dm_timer_enable(timer);
-
-       l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
-       if (autoreload) {
-               l |= OMAP_TIMER_CTRL_AR;
-               omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
-       } else {
-               l &= ~OMAP_TIMER_CTRL_AR;
-       }
-       l |= OMAP_TIMER_CTRL_ST;
-
-       __omap_dm_timer_load_start(timer, l, load, timer->posted);
-
-       /* Save the context */
-       timer->context.tclr = l;
-       timer->context.tldr = load;
-       timer->context.tcrr = load;
-       return 0;
-}
 static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
                                   unsigned int match)
 {
index b22e6bba71f1510618370bc7ce223cabe38717f1..4d2b33a30292a508453ea9458c43a01df1253af8 100644 (file)
@@ -26,10 +26,6 @@ config CPU_FREQ_GOV_COMMON
        select IRQ_WORK
        bool
 
-config CPU_FREQ_BOOST_SW
-       bool
-       depends on THERMAL
-
 config CPU_FREQ_STAT
        bool "CPU frequency transition statistics"
        help
index c72258a44ba4d1e090dedc37b08bb42ec32cac70..73bb2aafb1a8f30fe137ac6e61b61e46608ed492 100644 (file)
@@ -366,7 +366,7 @@ static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *dat
 
        val = drv_read(data, mask);
 
-       pr_debug("get_cur_val = %u\n", val);
+       pr_debug("%s = %u\n", __func__, val);
 
        return val;
 }
@@ -378,7 +378,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
        unsigned int freq;
        unsigned int cached_freq;
 
-       pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
+       pr_debug("%s (%d)\n", __func__, cpu);
 
        policy = cpufreq_cpu_get_raw(cpu);
        if (unlikely(!policy))
@@ -458,8 +458,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
        if (acpi_pstate_strict) {
                if (!check_freqs(policy, mask,
                                 policy->freq_table[index].frequency)) {
-                       pr_debug("acpi_cpufreq_target failed (%d)\n",
-                               policy->cpu);
+                       pr_debug("%s (%d)\n", __func__, policy->cpu);
                        result = -EAGAIN;
                }
        }
@@ -573,7 +572,7 @@ static int cpufreq_boost_down_prep(unsigned int cpu)
 static int __init acpi_cpufreq_early_init(void)
 {
        unsigned int i;
-       pr_debug("acpi_cpufreq_early_init\n");
+       pr_debug("%s\n", __func__);
 
        acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
        if (!acpi_perf_data) {
@@ -657,7 +656,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        static int blacklisted;
 #endif
 
-       pr_debug("acpi_cpufreq_cpu_init\n");
+       pr_debug("%s\n", __func__);
 
 #ifdef CONFIG_SMP
        if (blacklisted)
@@ -856,7 +855,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
        struct acpi_cpufreq_data *data = policy->driver_data;
 
-       pr_debug("acpi_cpufreq_cpu_exit\n");
+       pr_debug("%s\n", __func__);
 
        policy->fast_switch_possible = false;
        policy->driver_data = NULL;
@@ -881,7 +880,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
 {
        struct acpi_cpufreq_data *data = policy->driver_data;
 
-       pr_debug("acpi_cpufreq_resume\n");
+       pr_debug("%s\n", __func__);
 
        data->resume = 1;
 
@@ -954,7 +953,7 @@ static int __init acpi_cpufreq_init(void)
        if (cpufreq_get_current_driver())
                return -EEXIST;
 
-       pr_debug("acpi_cpufreq_init\n");
+       pr_debug("%s\n", __func__);
 
        ret = acpi_cpufreq_early_init();
        if (ret)
@@ -991,7 +990,7 @@ static int __init acpi_cpufreq_init(void)
 
 static void __exit acpi_cpufreq_exit(void)
 {
-       pr_debug("acpi_cpufreq_exit\n");
+       pr_debug("%s\n", __func__);
 
        acpi_cpufreq_boost_exit();
 
index 4ac7c3cf34bef93ab2070d1c5f69a7c3a8b0ec25..6927a8c0e748055480b62bbea7f0f1a6165d3693 100644 (file)
@@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void)
                        PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
 
        if (!pcidev) {
-               if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+               if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
                        return -ENODEV;
        }
 
index 75491fc841a6b7e6b6a621e7bb3c45e215fd466b..0df16eb1eb3cebcac2953fa52f70bf13c1a6d0b7 100644 (file)
@@ -359,11 +359,11 @@ static int __init armada37xx_cpufreq_driver_init(void)
        struct armada_37xx_dvfs *dvfs;
        struct platform_device *pdev;
        unsigned long freq;
-       unsigned int cur_frequency;
+       unsigned int cur_frequency, base_frequency;
        struct regmap *nb_pm_base, *avs_base;
        struct device *cpu_dev;
        int load_lvl, ret;
-       struct clk *clk;
+       struct clk *clk, *parent;
 
        nb_pm_base =
                syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
@@ -399,6 +399,22 @@ static int __init armada37xx_cpufreq_driver_init(void)
                return PTR_ERR(clk);
        }
 
+       parent = clk_get_parent(clk);
+       if (IS_ERR(parent)) {
+               dev_err(cpu_dev, "Cannot get parent clock for CPU0\n");
+               clk_put(clk);
+               return PTR_ERR(parent);
+       }
+
+       /* Get parent CPU frequency */
+       base_frequency =  clk_get_rate(parent);
+
+       if (!base_frequency) {
+               dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n");
+               clk_put(clk);
+               return -EINVAL;
+       }
+
        /* Get nominal (current) CPU frequency */
        cur_frequency = clk_get_rate(clk);
        if (!cur_frequency) {
@@ -431,7 +447,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
        for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
             load_lvl++) {
                unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
-               freq = cur_frequency / dvfs->divider[load_lvl];
+               freq = base_frequency / dvfs->divider[load_lvl];
                ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
                if (ret)
                        goto remove_opp;
index b3f4bd647e9b39792228df4c0d8589545e4dae9a..988ebc326bdbb02c6d29c804e32455d43cd3d539 100644 (file)
@@ -132,6 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
                of_node_put(node);
                return -ENODEV;
        }
+       of_node_put(node);
 
        nb_cpus = num_possible_cpus();
        freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
index e10922709d1399c4b7ec3b804548a9ecb3166d4b..7ea217c88c2ea46ec863d6845d95f23ae6e2dde3 100644 (file)
 
 static LIST_HEAD(cpufreq_policy_list);
 
-static inline bool policy_is_inactive(struct cpufreq_policy *policy)
-{
-       return cpumask_empty(policy->cpus);
-}
-
 /* Macros to iterate over CPU policies */
 #define for_each_suitable_policy(__policy, __active)                    \
        list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
@@ -250,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
 }
 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
 
+/**
+ * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
+ * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
+ */
+void cpufreq_cpu_release(struct cpufreq_policy *policy)
+{
+       if (WARN_ON(!policy))
+               return;
+
+       lockdep_assert_held(&policy->rwsem);
+
+       up_write(&policy->rwsem);
+
+       cpufreq_cpu_put(policy);
+}
+
+/**
+ * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
+ * @cpu: CPU to find the policy for.
+ *
+ * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
+ * if the policy returned by it is not NULL, acquire its rwsem for writing.
+ * Return the policy if it is active or release it and return NULL otherwise.
+ *
+ * The policy returned by this function has to be released with the help of
+ * cpufreq_cpu_release() in order to release its rwsem and balance its usage
+ * counter properly.
+ */
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
+{
+       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+       if (!policy)
+               return NULL;
+
+       down_write(&policy->rwsem);
+
+       if (policy_is_inactive(policy)) {
+               cpufreq_cpu_release(policy);
+               return NULL;
+       }
+
+       return policy;
+}
+
 /*********************************************************************
  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
  *********************************************************************/
@@ -669,9 +709,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
        return ret;
 }
 
-static int cpufreq_set_policy(struct cpufreq_policy *policy,
-                               struct cpufreq_policy *new_policy);
-
 /**
  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
  */
@@ -857,11 +894,9 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
 {
        unsigned int limit;
        int ret;
-       if (cpufreq_driver->bios_limit) {
-               ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
-               if (!ret)
-                       return sprintf(buf, "%u\n", limit);
-       }
+       ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
+       if (!ret)
+               return sprintf(buf, "%u\n", limit);
        return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
 }
 
@@ -1098,6 +1133,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
                                   cpufreq_global_kobject, "policy%u", cpu);
        if (ret) {
                pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+               kobject_put(&policy->kobj);
                goto err_free_real_cpus;
        }
 
@@ -1550,7 +1586,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
 {
        unsigned int ret_freq = 0;
 
-       if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
+       if (unlikely(policy_is_inactive(policy)))
                return ret_freq;
 
        ret_freq = cpufreq_driver->get(policy->cpu);
@@ -1588,7 +1624,8 @@ unsigned int cpufreq_get(unsigned int cpu)
 
        if (policy) {
                down_read(&policy->rwsem);
-               ret_freq = __cpufreq_get(policy);
+               if (cpufreq_driver->get)
+                       ret_freq = __cpufreq_get(policy);
                up_read(&policy->rwsem);
 
                cpufreq_cpu_put(policy);
@@ -2229,8 +2266,8 @@ EXPORT_SYMBOL(cpufreq_get_policy);
  *
  * The cpuinfo part of @policy is not updated by this function.
  */
-static int cpufreq_set_policy(struct cpufreq_policy *policy,
-                             struct cpufreq_policy *new_policy)
+int cpufreq_set_policy(struct cpufreq_policy *policy,
+                      struct cpufreq_policy *new_policy)
 {
        struct cpufreq_governor *old_gov;
        int ret;
@@ -2337,17 +2374,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
  */
 void cpufreq_update_policy(unsigned int cpu)
 {
-       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+       struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
        struct cpufreq_policy new_policy;
 
        if (!policy)
                return;
 
-       down_write(&policy->rwsem);
-
-       if (policy_is_inactive(policy))
-               goto unlock;
-
        /*
         * BIOS might change freq behind our back
         * -> ask driver for current freq and notify governors about a change
@@ -2364,12 +2396,26 @@ void cpufreq_update_policy(unsigned int cpu)
        cpufreq_set_policy(policy, &new_policy);
 
 unlock:
-       up_write(&policy->rwsem);
-
-       cpufreq_cpu_put(policy);
+       cpufreq_cpu_release(policy);
 }
 EXPORT_SYMBOL(cpufreq_update_policy);
 
+/**
+ * cpufreq_update_limits - Update policy limits for a given CPU.
+ * @cpu: CPU to update the policy limits for.
+ *
+ * Invoke the driver's ->update_limits callback if present or call
+ * cpufreq_update_policy() for @cpu.
+ */
+void cpufreq_update_limits(unsigned int cpu)
+{
+       if (cpufreq_driver->update_limits)
+               cpufreq_driver->update_limits(cpu);
+       else
+               cpufreq_update_policy(cpu);
+}
+EXPORT_SYMBOL_GPL(cpufreq_update_limits);
+
 /*********************************************************************
  *               BOOST                                              *
  *********************************************************************/
@@ -2426,7 +2472,7 @@ int cpufreq_boost_trigger_state(int state)
 
 static bool cpufreq_boost_supported(void)
 {
-       return likely(cpufreq_driver) && cpufreq_driver->set_boost;
+       return cpufreq_driver->set_boost;
 }
 
 static int create_boost_sysfs_file(void)
index ffa9adeaba31bf3f303a37ef3790dcd1d7c51bfb..9d1d9bf02710b77a0e8dd04205b591ce9ff31860 100644 (file)
@@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
        /* Failure, so roll back. */
        pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
 
+       kobject_put(&dbs_data->attr_set.kobj);
+
        policy->governor_data = NULL;
 
        if (!have_governor_per_policy())
index e2db5581489a03a4db6b91791e4deaec44c7e33a..08b192eb22c6eb694f04343f1c17d81dbd3cef05 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 
-static DEFINE_SPINLOCK(cpufreq_stats_lock);
 
 struct cpufreq_stats {
        unsigned int total_trans;
@@ -23,6 +22,7 @@ struct cpufreq_stats {
        unsigned int state_num;
        unsigned int last_index;
        u64 *time_in_state;
+       spinlock_t lock;
        unsigned int *freq_table;
        unsigned int *trans_table;
 };
@@ -39,12 +39,12 @@ static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
 {
        unsigned int count = stats->max_state;
 
-       spin_lock(&cpufreq_stats_lock);
+       spin_lock(&stats->lock);
        memset(stats->time_in_state, 0, count * sizeof(u64));
        memset(stats->trans_table, 0, count * count * sizeof(int));
        stats->last_time = get_jiffies_64();
        stats->total_trans = 0;
-       spin_unlock(&cpufreq_stats_lock);
+       spin_unlock(&stats->lock);
 }
 
 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
@@ -62,9 +62,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
        if (policy->fast_switch_enabled)
                return 0;
 
-       spin_lock(&cpufreq_stats_lock);
+       spin_lock(&stats->lock);
        cpufreq_stats_update(stats);
-       spin_unlock(&cpufreq_stats_lock);
+       spin_unlock(&stats->lock);
 
        for (i = 0; i < stats->state_num; i++) {
                len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -211,6 +211,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
        stats->state_num = i;
        stats->last_time = get_jiffies_64();
        stats->last_index = freq_table_get_index(stats, policy->cur);
+       spin_lock_init(&stats->lock);
 
        policy->stats = stats;
        ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
@@ -242,11 +243,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
        if (old_index == -1 || new_index == -1 || old_index == new_index)
                return;
 
-       spin_lock(&cpufreq_stats_lock);
+       spin_lock(&stats->lock);
        cpufreq_stats_update(stats);
 
        stats->last_index = new_index;
        stats->trans_table[old_index * stats->max_state + new_index]++;
        stats->total_trans++;
-       spin_unlock(&cpufreq_stats_lock);
+       spin_unlock(&stats->lock);
 }
index 3a8cc99e6815fa4c1597e9cd5f2367bcc1d40d6c..e7be0af3199f852578af1f27f49717eb3a1e9bac 100644 (file)
@@ -290,9 +290,6 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
 
 struct freq_attr *cpufreq_generic_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
-#ifdef CONFIG_CPU_FREQ_BOOST_SW
-       &cpufreq_freq_attr_scaling_boost_freqs,
-#endif
        NULL,
 };
 EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
index a4ff09f91c8f8ca4a453f1b11c7ad1fac3751a60..3e17560b1efe366fc0e64853efc6c832ed452f70 100644 (file)
@@ -388,11 +388,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
                ret = imx6ul_opp_check_speed_grading(cpu_dev);
                if (ret) {
                        if (ret == -EPROBE_DEFER)
-                               return ret;
+                               goto put_node;
 
                        dev_err(cpu_dev, "failed to read ocotp: %d\n",
                                ret);
-                       return ret;
+                       goto put_node;
                }
        } else {
                imx6q_opp_check_speed_grading(cpu_dev);
index 2986119dd31fb8391e3256a88942272870acad44..34b54df41aaafb8e177d05d7d693c3708c226299 100644 (file)
@@ -179,6 +179,7 @@ struct vid_data {
  *                     based on the MSR_IA32_MISC_ENABLE value and whether or
  *                     not the maximum reported turbo P-state is different from
  *                     the maximum reported non-turbo one.
+ * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
  * @min_perf_pct:      Minimum capacity limit in percent of the maximum turbo
  *                     P-state capacity.
  * @max_perf_pct:      Maximum capacity limit in percent of the maximum turbo
@@ -187,6 +188,7 @@ struct vid_data {
 struct global_params {
        bool no_turbo;
        bool turbo_disabled;
+       bool turbo_disabled_mf;
        int max_perf_pct;
        int min_perf_pct;
 };
@@ -525,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
        u64 epb;
        int ret;
 
-       if (!static_cpu_has(X86_FEATURE_EPB))
+       if (!boot_cpu_has(X86_FEATURE_EPB))
                return -ENXIO;
 
        ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
@@ -539,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
 {
        s16 epp;
 
-       if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+       if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
                /*
                 * When hwp_req_data is 0, means that caller didn't read
                 * MSR_HWP_REQUEST, so need to read and get EPP.
@@ -564,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
        u64 epb;
        int ret;
 
-       if (!static_cpu_has(X86_FEATURE_EPB))
+       if (!boot_cpu_has(X86_FEATURE_EPB))
                return -ENXIO;
 
        ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
@@ -612,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
        if (epp < 0)
                return epp;
 
-       if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+       if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
                if (epp == HWP_EPP_PERFORMANCE)
                        return 1;
                if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
@@ -621,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
                        return 3;
                else
                        return 4;
-       } else if (static_cpu_has(X86_FEATURE_EPB)) {
+       } else if (boot_cpu_has(X86_FEATURE_EPB)) {
                /*
                 * Range:
                 *      0x00-0x03       :       Performance
@@ -649,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
 
        mutex_lock(&intel_pstate_limits_lock);
 
-       if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+       if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
                u64 value;
 
                ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
@@ -824,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
                epp = cpu_data->epp_powersave;
        }
 update_epp:
-       if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+       if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
                value &= ~GENMASK_ULL(31, 24);
                value |= (u64)epp << 24;
        } else {
@@ -849,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
        value |= HWP_MIN_PERF(min_perf);
 
        /* Set EPP/EPB to min */
-       if (static_cpu_has(X86_FEATURE_HWP_EPP))
+       if (boot_cpu_has(X86_FEATURE_HWP_EPP))
                value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
        else
                intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
@@ -897,6 +899,48 @@ static void intel_pstate_update_policies(void)
                cpufreq_update_policy(cpu);
 }
 
+static void intel_pstate_update_max_freq(unsigned int cpu)
+{
+       struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+       struct cpufreq_policy new_policy;
+       struct cpudata *cpudata;
+
+       if (!policy)
+               return;
+
+       cpudata = all_cpu_data[cpu];
+       policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+                       cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
+       memcpy(&new_policy, policy, sizeof(*policy));
+       new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
+       new_policy.min = min(policy->user_policy.min, new_policy.max);
+
+       cpufreq_set_policy(policy, &new_policy);
+
+       cpufreq_cpu_release(policy);
+}
+
+static void intel_pstate_update_limits(unsigned int cpu)
+{
+       mutex_lock(&intel_pstate_driver_lock);
+
+       update_turbo_state();
+       /*
+        * If turbo has been turned on or off globally, policy limits for
+        * all CPUs need to be updated to reflect that.
+        */
+       if (global.turbo_disabled_mf != global.turbo_disabled) {
+               global.turbo_disabled_mf = global.turbo_disabled;
+               for_each_possible_cpu(cpu)
+                       intel_pstate_update_max_freq(cpu);
+       } else {
+               cpufreq_update_policy(cpu);
+       }
+
+       mutex_unlock(&intel_pstate_driver_lock);
+}
+
 /************************** sysfs begin ************************/
 #define show_one(file_name, object)                                    \
        static ssize_t show_##file_name                                 \
@@ -1197,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
 {
        /* First disable HWP notification interrupt as we don't process them */
-       if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
+       if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
                wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
 
        wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
@@ -2138,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
        update_turbo_state();
+       global.turbo_disabled_mf = global.turbo_disabled;
        policy->cpuinfo.max_freq = global.turbo_disabled ?
                        cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
        policy->cpuinfo.max_freq *= cpu->pstate.scaling;
@@ -2182,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = {
        .init           = intel_pstate_cpu_init,
        .exit           = intel_pstate_cpu_exit,
        .stop_cpu       = intel_pstate_stop_cpu,
+       .update_limits  = intel_pstate_update_limits,
        .name           = "intel_pstate",
 };
 
@@ -2316,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = {
        .init           = intel_cpufreq_cpu_init,
        .exit           = intel_pstate_cpu_exit,
        .stop_cpu       = intel_cpufreq_stop_cpu,
+       .update_limits  = intel_pstate_update_limits,
        .name           = "intel_cpufreq",
 };
 
index c2dd43f3f5d8a3092e6847f18d124f0631bdf065..8d63a6dc8383ce2964e31a53070d4bb0a1a5fcf8 100644 (file)
@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
        priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
        if (IS_ERR(priv.cpu_clk)) {
                dev_err(priv.dev, "Unable to get cpuclk\n");
-               return PTR_ERR(priv.cpu_clk);
+               err = PTR_ERR(priv.cpu_clk);
+               goto out_node;
        }
 
        err = clk_prepare_enable(priv.cpu_clk);
        if (err) {
                dev_err(priv.dev, "Unable to prepare cpuclk\n");
-               return err;
+               goto out_node;
        }
 
        kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
                goto out_ddr;
        }
 
-       of_node_put(np);
-       np = NULL;
-
        err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
-       if (!err)
-               return 0;
+       if (err) {
+               dev_err(priv.dev, "Failed to register cpufreq driver\n");
+               goto out_powersave;
+       }
 
-       dev_err(priv.dev, "Failed to register cpufreq driver\n");
+       of_node_put(np);
+       return 0;
 
+out_powersave:
        clk_disable_unprepare(priv.powersave_clk);
 out_ddr:
        clk_disable_unprepare(priv.ddr_clk);
 out_cpu:
        clk_disable_unprepare(priv.cpu_clk);
+out_node:
        of_node_put(np);
 
        return err;
index d9df89392b8439587fad331f5a728255b7f9050a..a94355723ef85780d347126b0d41bb70b0380446 100644 (file)
@@ -210,7 +210,7 @@ static int __init maple_cpufreq_init(void)
         */
        valp = of_get_property(cpunode, "clock-frequency", NULL);
        if (!valp)
-               return -ENODEV;
+               goto bail_noprops;
        max_freq = (*valp)/1000;
        maple_cpu_freqs[0].frequency = max_freq;
        maple_cpu_freqs[1].frequency = max_freq/2;
@@ -231,10 +231,6 @@ static int __init maple_cpufreq_init(void)
 
        rc = cpufreq_register_driver(&maple_cpufreq_driver);
 
-       of_node_put(cpunode);
-
-       return rc;
-
 bail_noprops:
        of_node_put(cpunode);
 
index 75dfbd2a58ea6f1df8db4e89a8d47883e29ecfea..c7710c149de85a57fc6fa9e784dee83a0b6cf11d 100644 (file)
@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        cpu = of_get_cpu_node(policy->cpu, NULL);
 
+       of_node_put(cpu);
        if (!cpu)
                goto out;
 
index 52f0d91d30c17eb0a721a55c839488f7b8e652e4..9b4ce2eb8222c0bc85aadcf67056774b1a556173 100644 (file)
@@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
        volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
        if (volt_gpio_np)
                voltage_gpio = read_gpio(volt_gpio_np);
+       of_node_put(volt_gpio_np);
        if (!voltage_gpio){
                pr_err("missing cpu-vcore-select gpio\n");
                return 1;
@@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
        if (volt_gpio_np)
                voltage_gpio = read_gpio(volt_gpio_np);
 
+       of_node_put(volt_gpio_np);
        pvr = mfspr(SPRN_PVR);
        has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
 
index fb77b39a4ce36b21047c744e906e163bf2e34fe0..3c12e03fa3430b848af5843207ac1fd84361ed7f 100644 (file)
@@ -1178,7 +1178,7 @@ static int powernowk8_init(void)
        unsigned int i, supported_cpus = 0;
        int ret;
 
-       if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
+       if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
                __request_acpi_cpufreq();
                return -ENODEV;
        }
index 41a0f0be3f9ff12cb38211b3b0eae5ec56880892..8414c3a4ea08cdef5d58c7e4e522e8bba9a23d70 100644 (file)
@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
        if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
            !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
                pr_info("invalid CBE regs pointers for cpufreq\n");
+               of_node_put(cpu);
                return -EINVAL;
        }
 
index 4295e547626426b2f57a5f8381d60e128aa32a3e..71b640c8c1a50d0d2565dbfe2d84d4cee2ab1ae3 100644 (file)
@@ -280,10 +280,12 @@ static const struct of_device_id node_matches[] __initconst = {
 
        { .compatible = "fsl,ls1012a-clockgen", },
        { .compatible = "fsl,ls1021a-clockgen", },
+       { .compatible = "fsl,ls1028a-clockgen", },
        { .compatible = "fsl,ls1043a-clockgen", },
        { .compatible = "fsl,ls1046a-clockgen", },
        { .compatible = "fsl,ls1088a-clockgen", },
        { .compatible = "fsl,ls2080a-clockgen", },
+       { .compatible = "fsl,lx2160a-clockgen", },
        { .compatible = "fsl,p4080-clockgen", },
        { .compatible = "fsl,qoriq-clockgen-1.0", },
        { .compatible = "fsl,qoriq-clockgen-2.0", },
index a1fb735685dbfbb4bbae28fd9e8940df5adffdb8..e086b2dd4072d1fa6e0355b61c6e76d315547cff 100644 (file)
@@ -412,7 +412,7 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
 }
 
 /**
- * centrino_setpolicy - set a new CPUFreq policy
+ * centrino_target - set a new CPUFreq policy
  * @policy: new policy
  * @index: index of target frequency
  *
index 0171a6e190d71d363d24e96cb83721edfa2f7e7e..f7199a35cbb6596e86c176956ffacc7a96911791 100644 (file)
@@ -84,7 +84,7 @@ static struct cpuidle_driver exynos_idle_driver = {
                [1] = {
                        .enter                  = exynos_enter_lowpower,
                        .exit_latency           = 300,
-                       .target_residency       = 100000,
+                       .target_residency       = 10000,
                        .name                   = "C1",
                        .desc                   = "ARM power down",
                },
index 7f108309e871ea63fbc16b1b896e3ef4cd443216..0f4b7c45df3e103cba0a5a617b782cee522a44be 100644 (file)
@@ -328,9 +328,23 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                  int index)
 {
+       int ret = 0;
+
+       /*
+        * Store the next hrtimer, which becomes either next tick or the next
+        * timer event, whatever expires first. Additionally, to make this data
+        * useful for consumers outside cpuidle, we rely on that the governor's
+        * ->select() callback have decided, whether to stop the tick or not.
+        */
+       WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
+
        if (cpuidle_state_is_coupled(drv, index))
-               return cpuidle_enter_state_coupled(dev, drv, index);
-       return cpuidle_enter_state(dev, drv, index);
+               ret = cpuidle_enter_state_coupled(dev, drv, index);
+       else
+               ret = cpuidle_enter_state(dev, drv, index);
+
+       WRITE_ONCE(dev->next_hrtimer, 0);
+       return ret;
 }
 
 /**
@@ -511,6 +525,7 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
 {
        memset(dev->states_usage, 0, sizeof(dev->states_usage));
        dev->last_residency = 0;
+       dev->next_hrtimer = 0;
 }
 
 /**
index ec8a291d62bab3c58f584a61699d3cdc0118a689..54093ffd0aefa8a7c3a40f887958c61b2222d03c 100644 (file)
@@ -671,7 +671,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
        d = bcm2835_dma_create_cb_chain(chan, direction, false,
                                        info, extra,
                                        frames, src, dst, 0, 0,
-                                       GFP_KERNEL);
+                                       GFP_NOWAIT);
        if (!d)
                return NULL;
 
index 131f3974740d5d75a67141f15b019b06d7011c8a..814853842e29f9e103beab75468de580ac34bca7 100644 (file)
@@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
        mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
 #else
-       mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
+       mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
 #endif
 
        /* setup the length */
index 2b4f2569816956621e1d6201851ae16d21db27f3..e2a5398f89b51129345fbb076710083ef24f9188 100644 (file)
@@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
        enum dma_status status;
        unsigned int residue = 0;
        unsigned int dptr = 0;
+       unsigned int chcrb;
+       unsigned int tcrb;
+       unsigned int i;
 
        if (!desc)
                return 0;
@@ -1329,6 +1332,24 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
                return 0;
        }
 
+       /*
+        * We need to read two registers.
+        * Make sure the control register does not skip to next chunk
+        * while reading the counter.
+        * Trying it 3 times should be enough: Initial read, retry, retry
+        * for the paranoid.
+        */
+       for (i = 0; i < 3; i++) {
+               chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+                                           RCAR_DMACHCRB_DPTR_MASK;
+               tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
+               /* Still the same? */
+               if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+                             RCAR_DMACHCRB_DPTR_MASK))
+                       break;
+       }
+       WARN_ONCE(i >= 3, "residue might be not continuous!");
+
        /*
         * In descriptor mode the descriptor running pointer is not maintained
         * by the interrupt handler, find the running descriptor from the
@@ -1336,8 +1357,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
         * mode just use the running descriptor pointer.
         */
        if (desc->hwdescs.use) {
-               dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
-                       RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+               dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
                if (dptr == 0)
                        dptr = desc->nchunks;
                dptr--;
@@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
        }
 
        /* Add the residue for the current chunk. */
-       residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
+       residue += tcrb << desc->xfer_shift;
 
        return residue;
 }
@@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
        enum dma_status status;
        unsigned long flags;
        unsigned int residue;
+       bool cyclic;
 
        status = dma_cookie_status(chan, cookie, txstate);
        if (status == DMA_COMPLETE || !txstate)
@@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
 
        spin_lock_irqsave(&rchan->lock, flags);
        residue = rcar_dmac_chan_get_residue(rchan, cookie);
+       cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
        spin_unlock_irqrestore(&rchan->lock, flags);
 
        /* if there's no residue, the cookie is complete */
-       if (!residue)
+       if (!residue && !cyclic)
                return DMA_COMPLETE;
 
        dma_set_residue(txstate, residue);
index 8e17149655f069ff923e09c943295b469d87d076..540e8cd16ee6ec6ae0205a2f8c6193f129e80825 100644 (file)
@@ -116,7 +116,7 @@ config EXTCON_PALMAS
 
 config EXTCON_PTN5150
        tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
-       depends on I2C && GPIOLIB || COMPILE_TEST
+       depends on I2C && (GPIOLIB || COMPILE_TEST)
        select REGMAP_I2C
        help
          Say Y here to enable support for USB peripheral and USB host
index cac16c4b0df3eb6232b75cb7237091a5fda8f95e..7b655f6156fba922378e6ad6c32cee8399b4aef7 100644 (file)
@@ -5,20 +5,6 @@
 
 menu "Firmware Drivers"
 
-config ARM_PSCI_FW
-       bool
-
-config ARM_PSCI_CHECKER
-       bool "ARM PSCI checker"
-       depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
-       help
-         Run the PSCI checker during startup. This checks that hotplug and
-         suspend operations work correctly when using PSCI.
-
-         The torture tests may interfere with the PSCI checker by turning CPUs
-         on and off through hotplug, so for now torture tests and PSCI checker
-         are mutually exclusive.
-
 config ARM_SCMI_PROTOCOL
        bool "ARM System Control and Management Interface (SCMI) Message Protocol"
        depends on ARM || ARM64 || COMPILE_TEST
@@ -270,6 +256,7 @@ config TI_SCI_PROTOCOL
 config HAVE_ARM_SMCCC
        bool
 
+source "drivers/firmware/psci/Kconfig"
 source "drivers/firmware/broadcom/Kconfig"
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/efi/Kconfig"
index 80feb635120fc72283da432b08ba1f6eed8aa9dd..9a3909a226820cae08c5061bf5da5c44148e8995 100644 (file)
@@ -2,8 +2,6 @@
 #
 # Makefile for the linux kernel.
 #
-obj-$(CONFIG_ARM_PSCI_FW)      += psci.o
-obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
 obj-$(CONFIG_ARM_SCPI_PROTOCOL)        += arm_scpi.o
 obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
 obj-$(CONFIG_ARM_SDE_INTERFACE)        += arm_sdei.o
@@ -25,6 +23,7 @@ CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQU
 obj-$(CONFIG_TI_SCI_PROTOCOL)  += ti_sci.o
 
 obj-$(CONFIG_ARM_SCMI_PROTOCOL)        += arm_scmi/
+obj-y                          += psci/
 obj-y                          += broadcom/
 obj-y                          += meson/
 obj-$(CONFIG_GOOGLE_FIRMWARE)  += google/
diff --git a/drivers/firmware/psci/Kconfig b/drivers/firmware/psci/Kconfig
new file mode 100644 (file)
index 0000000..26a3b32
--- /dev/null
@@ -0,0 +1,13 @@
+config ARM_PSCI_FW
+       bool
+
+config ARM_PSCI_CHECKER
+       bool "ARM PSCI checker"
+       depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
+       help
+         Run the PSCI checker during startup. This checks that hotplug and
+         suspend operations work correctly when using PSCI.
+
+         The torture tests may interfere with the PSCI checker by turning CPUs
+         on and off through hotplug, so for now torture tests and PSCI checker
+         are mutually exclusive.
diff --git a/drivers/firmware/psci/Makefile b/drivers/firmware/psci/Makefile
new file mode 100644 (file)
index 0000000..1956b88
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_ARM_PSCI_FW)      += psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
similarity index 90%
rename from drivers/firmware/psci.c
rename to drivers/firmware/psci/psci.c
index c80ec1d03274413863fb8e3f917f26daa8f868c2..fe090ef43d286764983bf17b99513a0ded0bc8a7 100644 (file)
@@ -88,6 +88,7 @@ static u32 psci_function_id[PSCI_FN_MAX];
                                PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
 
 static u32 psci_cpu_suspend_feature;
+static bool psci_system_reset2_supported;
 
 static inline bool psci_has_ext_power_state(void)
 {
@@ -95,6 +96,11 @@ static inline bool psci_has_ext_power_state(void)
                                PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
 }
 
+static inline bool psci_has_osi_support(void)
+{
+       return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
+}
+
 static inline bool psci_power_state_loses_context(u32 state)
 {
        const u32 mask = psci_has_ext_power_state() ?
@@ -253,7 +259,17 @@ static int get_set_conduit_method(struct device_node *np)
 
 static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
 {
-       invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+       if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
+           psci_system_reset2_supported) {
+               /*
+                * reset_type[31] = 0 (architectural)
+                * reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
+                * cookie = 0 (ignored by the implementation)
+                */
+               invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
+       } else {
+               invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+       }
 }
 
 static void psci_sys_poweroff(void)
@@ -270,9 +286,26 @@ static int __init psci_features(u32 psci_func_id)
 #ifdef CONFIG_CPU_IDLE
 static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
 
+static int psci_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+       int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
+
+       if (err) {
+               pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
+               return err;
+       }
+
+       if (!psci_power_state_is_valid(*state)) {
+               pr_warn("Invalid PSCI power state %#x\n", *state);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
 {
-       int i, ret, count = 0;
+       int i, ret = 0, count = 0;
        u32 *psci_states;
        struct device_node *state_node;
 
@@ -291,29 +324,16 @@ static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
                return -ENOMEM;
 
        for (i = 0; i < count; i++) {
-               u32 state;
-
                state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+               ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
+               of_node_put(state_node);
 
-               ret = of_property_read_u32(state_node,
-                                          "arm,psci-suspend-param",
-                                          &state);
-               if (ret) {
-                       pr_warn(" * %pOF missing arm,psci-suspend-param property\n",
-                               state_node);
-                       of_node_put(state_node);
+               if (ret)
                        goto free_mem;
-               }
 
-               of_node_put(state_node);
-               pr_debug("psci-power-state %#x index %d\n", state, i);
-               if (!psci_power_state_is_valid(state)) {
-                       pr_warn("Invalid PSCI power state %#x\n", state);
-                       ret = -EINVAL;
-                       goto free_mem;
-               }
-               psci_states[i] = state;
+               pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
        }
+
        /* Idle states parsed correctly, initialize per-cpu pointer */
        per_cpu(psci_power_state, cpu) = psci_states;
        return 0;
@@ -451,6 +471,16 @@ static const struct platform_suspend_ops psci_suspend_ops = {
        .enter          = psci_system_suspend_enter,
 };
 
+static void __init psci_init_system_reset2(void)
+{
+       int ret;
+
+       ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
+
+       if (ret != PSCI_RET_NOT_SUPPORTED)
+               psci_system_reset2_supported = true;
+}
+
 static void __init psci_init_system_suspend(void)
 {
        int ret;
@@ -588,6 +618,7 @@ static int __init psci_probe(void)
                psci_init_smccc();
                psci_init_cpu_suspend();
                psci_init_system_suspend();
+               psci_init_system_reset2();
        }
 
        return 0;
@@ -605,9 +636,9 @@ static int __init psci_0_2_init(struct device_node *np)
        int err;
 
        err = get_set_conduit_method(np);
-
        if (err)
-               goto out_put_node;
+               return err;
+
        /*
         * Starting with v0.2, the PSCI specification introduced a call
         * (PSCI_VERSION) that allows probing the firmware version, so
@@ -615,11 +646,7 @@ static int __init psci_0_2_init(struct device_node *np)
         * can be carried out according to the specific version reported
         * by firmware
         */
-       err = psci_probe();
-
-out_put_node:
-       of_node_put(np);
-       return err;
+       return psci_probe();
 }
 
 /*
@@ -631,9 +658,8 @@ static int __init psci_0_1_init(struct device_node *np)
        int err;
 
        err = get_set_conduit_method(np);
-
        if (err)
-               goto out_put_node;
+               return err;
 
        pr_info("Using PSCI v0.1 Function IDs from DT\n");
 
@@ -657,15 +683,27 @@ static int __init psci_0_1_init(struct device_node *np)
                psci_ops.migrate = psci_migrate;
        }
 
-out_put_node:
-       of_node_put(np);
-       return err;
+       return 0;
+}
+
+static int __init psci_1_0_init(struct device_node *np)
+{
+       int err;
+
+       err = psci_0_2_init(np);
+       if (err)
+               return err;
+
+       if (psci_has_osi_support())
+               pr_info("OSI mode supported.\n");
+
+       return 0;
 }
 
 static const struct of_device_id psci_of_match[] __initconst = {
        { .compatible = "arm,psci",     .data = psci_0_1_init},
        { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
-       { .compatible = "arm,psci-1.0", .data = psci_0_2_init},
+       { .compatible = "arm,psci-1.0", .data = psci_1_0_init},
        {},
 };
 
@@ -674,6 +712,7 @@ int __init psci_dt_init(void)
        struct device_node *np;
        const struct of_device_id *matched_np;
        psci_initcall_t init_fn;
+       int ret;
 
        np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
 
@@ -681,7 +720,10 @@ int __init psci_dt_init(void)
                return -ENODEV;
 
        init_fn = (psci_initcall_t)matched_np->data;
-       return init_fn(np);
+       ret = init_fn(np);
+
+       of_node_put(np);
+       return ret;
 }
 
 #ifdef CONFIG_ACPI
index f0223cee97744825ee508e5c7ffe6057829359bf..77092268ee955fe280926f8426bd4613a949595f 100644 (file)
@@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_BOTH:
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
index 0495bf1d480a4cfe464e8ff330922264d03deff7..bca3e7740ef66c8fac2b8f89866935e3655fc9c3 100644 (file)
@@ -1379,7 +1379,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
        status = gpiochip_add_irqchip(chip, lock_key, request_key);
        if (status)
-               goto err_remove_chip;
+               goto err_free_gpiochip_mask;
 
        status = of_gpiochip_add(chip);
        if (status)
@@ -1387,7 +1387,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
        status = gpiochip_init_valid_mask(chip);
        if (status)
-               goto err_remove_chip;
+               goto err_remove_of_chip;
 
        for (i = 0; i < chip->ngpio; i++) {
                struct gpio_desc *desc = &gdev->descs[i];
@@ -1415,14 +1415,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
        if (gpiolib_initialized) {
                status = gpiochip_setup_dev(gdev);
                if (status)
-                       goto err_remove_chip;
+                       goto err_remove_acpi_chip;
        }
        return 0;
 
-err_remove_chip:
+err_remove_acpi_chip:
        acpi_gpiochip_remove(chip);
+err_remove_of_chip:
        gpiochip_free_hogs(chip);
        of_gpiochip_remove(chip);
+err_remove_chip:
+       gpiochip_irqchip_remove(chip);
+err_free_gpiochip_mask:
        gpiochip_free_valid_mask(chip);
 err_remove_irqchip_mask:
        gpiochip_irqchip_free_valid_mask(chip);
index 5d8b30fd4534582bbf0203343df180927f5bdbc1..79fb302fb9543f93cfb9738700f53e34006e869c 100644 (file)
@@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
 
                /* No need to recover an evicted BO */
                if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
+                   shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
                    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
                        continue;
 
index d0d966d6080a6dda87d57d2d8ee1ed2b58a1444a..1696644ec022391d24b93df9f1dacd23079bd72e 100644 (file)
@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
                                    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
        }
+       WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
 
        tmp = mmVM_L2_CNTL4_DEFAULT;
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
index c68fbd55db3ca6f01c49b86e60a584dfe8d90ff4..a6cda201c964c5bc918e8d693c2aa2fccf65eb58 100644 (file)
@@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
                return UPDATE_TYPE_FULL;
        }
 
+       if (u->surface->force_full_update) {
+               update_flags->bits.full_update = 1;
+               return UPDATE_TYPE_FULL;
+       }
+
        type = get_plane_info_update_type(u);
        elevate_update_type(&overall_type, type);
 
@@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
                }
 
                dc_resource_state_copy_construct(state, context);
+
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+                       struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+                       if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+                               new_pipe->plane_state->force_full_update = true;
+               }
        }
 
 
@@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
                dc->current_state = context;
                dc_release_state(old);
 
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+                       if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+                               pipe_ctx->plane_state->force_full_update = false;
+               }
        }
        /*let's use current_state to update watermark etc*/
        if (update_type >= UPDATE_TYPE_FULL)
index 1a7fd6aa77ebb213168cd477452ac1dd94d5aadc..0515095574e735e0535ee17ce3369168557c201e 100644 (file)
@@ -503,6 +503,9 @@ struct dc_plane_state {
        struct dc_plane_status status;
        struct dc_context *ctx;
 
+       /* HACK: Workaround for forcing full reprogramming under some conditions */
+       bool force_full_update;
+
        /* private to dc_surface.c */
        enum dc_irq_source irq_source;
        struct kref refcount;
index 4febf4ef7240e6aef2610063b55f3aee636bbca0..4fe3664fb49508e7f9c07ddc69f5b610fd884d1d 100644 (file)
@@ -190,6 +190,12 @@ static void submit_channel_request(
                                1,
                                0);
        }
+
+       REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+
+       REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+                               10, aux110->timeout_period/10);
+
        /* set the delay and the number of bytes to write */
 
        /* The length include
@@ -242,9 +248,6 @@ static void submit_channel_request(
                }
        }
 
-       REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
-       REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
-                               10, aux110->timeout_period/10);
        REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
 }
 
index d27f22c05e4b5abd0085fb7252fccba5296f6831..e28ed6a00ff4236ffaef4346528dc1ecbb179543 100644 (file)
@@ -71,11 +71,11 @@ enum {      /* This is the timeout as defined in DP 1.2a,
         * at most within ~240usec. That means,
         * increasing this timeout will not affect normal operation,
         * and we'll timeout after
-        * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+        * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec.
         * This timeout is especially important for
-        * resume from S3 and CTS.
+        * converters, resume from S3, and CTS.
         */
-       SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+       SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6
 };
 
 struct dce_aux {
index db761329a1e3ef19d2fa05f86fdaf5b3b06c6b53..ab7968c8f6a29937177c0a464f6da4db9e297631 100644 (file)
@@ -1046,6 +1046,10 @@ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
        if (hdmi->version < 0x200a)
                return false;
 
+       /* Disable if no DDC bus */
+       if (!hdmi->ddc)
+               return false;
+
        /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
        if (!display->hdmi.scdc.supported ||
            !display->hdmi.scdc.scrambling.supported)
@@ -1684,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
                         * Source Devices compliant shall set the
                         * Source Version = 1.
                         */
-                       drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION,
+                       drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION,
                                       &bytes);
-                       drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION,
+                       drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION,
                                min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
 
                        /* Enabled Scrambling in the Sink */
-                       drm_scdc_set_scrambling(&hdmi->i2c->adap, 1);
+                       drm_scdc_set_scrambling(hdmi->ddc, 1);
 
                        /*
                         * To activate the scrambler feature, you must ensure
@@ -1706,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
                        hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
                        hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
                                    HDMI_MC_SWRSTZ);
-                       drm_scdc_set_scrambling(&hdmi->i2c->adap, 0);
+                       drm_scdc_set_scrambling(hdmi->ddc, 0);
                }
        }
 
@@ -1800,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
         * iteration for others.
         * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
         * the workaround with a single iteration.
+        * The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have
+        * been identified as needing the workaround with a single iteration.
         */
 
        switch (hdmi->version) {
@@ -1808,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
                break;
        case 0x131a:
        case 0x132a:
+       case 0x200a:
        case 0x201a:
+       case 0x211a:
        case 0x212a:
                count = 1;
                break;
index ab4e60dfd6a3460001cbcae4691f1ede8ebb230e..98cea1f4b3bf05500dcd7fe24b2f367fa6c9e3eb 100644 (file)
@@ -3862,14 +3862,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
                ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
        else
                ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
+       if (ret)
+               return ret;
 
-       if (IS_GEN9_LP(dev_priv) && ret)
+       if (IS_GEN9_LP(dev_priv))
                pipe_config->lane_lat_optim_mask =
                        bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
 
        intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
 
-       return ret;
+       return 0;
 
 }
 
index 8891f29a8c7fffacad25f29e718376aa164261f7..48da4a969a0a9afabf6be3db6aff252fd02d3c83 100644 (file)
@@ -1886,6 +1886,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
        int pipe_bpp;
        int ret;
 
+       pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+               intel_dp_supports_fec(intel_dp, pipe_config);
+
        if (!intel_dp_supports_dsc(intel_dp, pipe_config))
                return -EINVAL;
 
@@ -2116,9 +2119,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
                return -EINVAL;
 
-       pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
-                                 intel_dp_supports_fec(intel_dp, pipe_config);
-
        ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
        if (ret < 0)
                return ret;
index e8f694b57b8ac857528824051ddcc42016e86239..376ffe842e2678d1f31ee68acd38908cff8a85d9 100644 (file)
@@ -338,8 +338,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                                    bool *enabled, int width, int height)
 {
        struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
+       unsigned long conn_configured, conn_seq, mask;
        unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
-       unsigned long conn_configured, conn_seq;
        int i, j;
        bool *save_enabled;
        bool fallback = true, ret = true;
@@ -357,9 +357,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                drm_modeset_backoff(&ctx);
 
        memcpy(save_enabled, enabled, count);
-       conn_seq = GENMASK(count - 1, 0);
+       mask = GENMASK(count - 1, 0);
        conn_configured = 0;
 retry:
+       conn_seq = conn_configured;
        for (i = 0; i < count; i++) {
                struct drm_fb_helper_connector *fb_conn;
                struct drm_connector *connector;
@@ -372,8 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                if (conn_configured & BIT(i))
                        continue;
 
-               /* First pass, only consider tiled connectors */
-               if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
+               if (conn_seq == 0 && !connector->has_tile)
                        continue;
 
                if (connector->status == connector_status_connected)
@@ -477,10 +477,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                conn_configured |= BIT(i);
        }
 
-       if (conn_configured != conn_seq) { /* repeat until no more are found */
-               conn_seq = conn_configured;
+       if ((conn_configured & mask) != mask && conn_configured != conn_seq)
                goto retry;
-       }
 
        /*
         * If the BIOS didn't enable everything it could, fall back to have the
index ec3602ebbc1cd1e87da13c9c909078927ccf2287..54011df8c2e807d7984dc7985764525899ff49d7 100644 (file)
@@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
        if (disable_partial)
                ipu_plane_disable(ipu_crtc->plane[1], true);
        if (disable_full)
-               ipu_plane_disable(ipu_crtc->plane[0], false);
+               ipu_plane_disable(ipu_crtc->plane[0], true);
 }
 
 static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
index 19fc601c9eeb52fc9704bbfc7c164dbfa71b7717..a1bec2779e76220c8568f5c78a7345ef2f7c3d36 100644 (file)
@@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
 EXPORT_SYMBOL(drm_sched_increase_karma);
 
 /**
- * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
+ * drm_sched_stop - stop the scheduler
  *
  * @sched: scheduler instance
- * @bad: bad scheduler job
  *
  */
 void drm_sched_stop(struct drm_gpu_scheduler *sched)
index 3ebd9f5e2719d7f028c2c87b1e2cedd6c60a5365..29258b404e549fbd31d67ec164c3bc852a10eb14 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/of_reserved_mem.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
@@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev)
                ret = -ENOMEM;
                goto free_drm;
        }
+
+       dev_set_drvdata(dev, drm);
        drm->dev_private = drv;
        INIT_LIST_HEAD(&drv->frontend_list);
        INIT_LIST_HEAD(&drv->engine_list);
@@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev)
 
        drm_dev_unregister(drm);
        drm_kms_helper_poll_fini(drm);
+       drm_atomic_helper_shutdown(drm);
        drm_mode_config_cleanup(drm);
+
+       component_unbind_all(dev, NULL);
        of_reserved_mem_device_release(dev);
+
        drm_dev_put(drm);
 }
 
@@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev)
 
 static int sun4i_drv_remove(struct platform_device *pdev)
 {
+       component_master_del(&pdev->dev, &sun4i_drv_master_ops);
+
        return 0;
 }
 
index 47c55974756d576b71193219b92d976078006b4e..d23c4bfde790ca0864722e8b1eb2907b7c9ee24f 100644 (file)
@@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 
        hdmi->dvi = !tegra_output_is_hdmi(output);
        if (!hdmi->dvi) {
-               err = tegra_hdmi_setup_audio(hdmi);
-               if (err < 0)
-                       hdmi->dvi = true;
+               /*
+                * Make sure that the audio format has been configured before
+                * enabling audio, otherwise we may try to divide by zero.
+               */
+               if (hdmi->format.sample_rate > 0) {
+                       err = tegra_hdmi_setup_audio(hdmi);
+                       if (err < 0)
+                               hdmi->dvi = true;
+               }
        }
 
        if (hdmi->config->has_hda)
index 3f56647cdb35f94ddcead862b286516ad903150e..1a01669b159ab78c0c9b849616bcc7d852738b77 100644 (file)
@@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
  * ttm_global_mutex - protecting the global BO state
  */
 DEFINE_MUTEX(ttm_global_mutex);
-struct ttm_bo_global ttm_bo_glob = {
-       .use_count = 0
-};
+unsigned ttm_bo_glob_use_count;
+struct ttm_bo_global ttm_bo_glob;
 
 static struct attribute ttm_bo_count = {
        .name = "bo_count",
@@ -876,8 +875,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                reservation_object_add_shared_fence(bo->resv, fence);
 
                ret = reservation_object_reserve_shared(bo->resv, 1);
-               if (unlikely(ret))
+               if (unlikely(ret)) {
+                       dma_fence_put(fence);
                        return ret;
+               }
 
                dma_fence_put(bo->moving);
                bo->moving = fence;
@@ -1529,12 +1530,13 @@ static void ttm_bo_global_release(void)
        struct ttm_bo_global *glob = &ttm_bo_glob;
 
        mutex_lock(&ttm_global_mutex);
-       if (--glob->use_count > 0)
+       if (--ttm_bo_glob_use_count > 0)
                goto out;
 
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
        ttm_mem_global_release(&ttm_mem_glob);
+       memset(glob, 0, sizeof(*glob));
 out:
        mutex_unlock(&ttm_global_mutex);
 }
@@ -1546,7 +1548,7 @@ static int ttm_bo_global_init(void)
        unsigned i;
 
        mutex_lock(&ttm_global_mutex);
-       if (++glob->use_count > 1)
+       if (++ttm_bo_glob_use_count > 1)
                goto out;
 
        ret = ttm_mem_global_init(&ttm_mem_glob);
index f1567c353b543a3376c6b64ab5fb6c4550f91b23..9a0909decb3668ee1e56a729c7664f3f01a33a72 100644 (file)
@@ -461,8 +461,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
 
 void ttm_mem_global_release(struct ttm_mem_global *glob)
 {
-       unsigned int i;
        struct ttm_mem_zone *zone;
+       unsigned int i;
 
        /* let the page allocator first stop the shrink work. */
        ttm_page_alloc_fini();
@@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
                zone = glob->zones[i];
                kobject_del(&zone->kobj);
                kobject_put(&zone->kobj);
-                       }
+       }
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
+       memset(glob, 0, sizeof(*glob));
 }
 
 static void ttm_check_swapping(struct ttm_mem_global *glob)
index f841accc2c0064a3edd865423a10818480477f39..627f8dc91d0ed23e0958dfc39d106c967dcd376a 100644 (file)
@@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                        }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       if (!(flags & TTM_PAGE_FLAG_DMA32)) {
-                               for (j = 0; j < HPAGE_PMD_NR; ++j)
-                                       if (p++ != pages[i + j])
+                       if (!(flags & TTM_PAGE_FLAG_DMA32) &&
+                           (npages - i) >= HPAGE_PMD_NR) {
+                               for (j = 1; j < HPAGE_PMD_NR; ++j)
+                                       if (++p != pages[i + j])
                                            break;
 
                                if (j == HPAGE_PMD_NR)
@@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                unsigned max_size, n2free;
 
                spin_lock_irqsave(&huge->lock, irq_flags);
-               while (i < npages) {
+               while ((npages - i) >= HPAGE_PMD_NR) {
                        struct page *p = pages[i];
                        unsigned j;
 
                        if (!p)
                                break;
 
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               if (p++ != pages[i + j])
+                       for (j = 1; j < HPAGE_PMD_NR; ++j)
+                               if (++p != pages[i + j])
                                    break;
 
                        if (j != HPAGE_PMD_NR)
index 730008d3da761e2eb37d9821c518672567a6e1c6..1baa10e9448472510006b7390e3e574841c0163a 100644 (file)
@@ -1042,7 +1042,7 @@ static void
 vc4_crtc_reset(struct drm_crtc *crtc)
 {
        if (crtc->state)
-               __drm_atomic_helper_crtc_destroy_state(crtc->state);
+               vc4_crtc_destroy_state(crtc, crtc->state);
 
        crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
        if (crtc->state)
index b996ac1d4fcc9cb3ca1ff86cad8be0c364c80c30..af92964b6889dd0dbfaadac5558cb27ee78b3d56 100644 (file)
@@ -205,10 +205,14 @@ static struct drm_driver driver = {
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = virtio_gpu_debugfs_init,
 #endif
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export = drm_gem_prime_export,
        .gem_prime_import = drm_gem_prime_import,
        .gem_prime_pin = virtgpu_gem_prime_pin,
        .gem_prime_unpin = virtgpu_gem_prime_unpin,
+       .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
+       .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
        .gem_prime_vmap = virtgpu_gem_prime_vmap,
        .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
        .gem_prime_mmap = virtgpu_gem_prime_mmap,
index 3238fdf58eb480ed9447d0639aaded9a88d28dcc..d577cb76f5ad6b66d26124284159c82706f44699 100644 (file)
@@ -354,6 +354,10 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
 /* virtgpu_prime.c */
 int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
 void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
+       struct drm_device *dev, struct dma_buf_attachment *attach,
+       struct sg_table *sgt);
 void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
 void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
index c59ec34c80a5df2c6b3a91f7ec73cd05f85445ad..eb51a78e11991c01cce73d34cf74907cf9202764 100644 (file)
@@ -39,6 +39,18 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
        WARN_ONCE(1, "not implemented");
 }
 
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+       return ERR_PTR(-ENODEV);
+}
+
+struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
+       struct drm_device *dev, struct dma_buf_attachment *attach,
+       struct sg_table *table)
+{
+       return ERR_PTR(-ENODEV);
+}
+
 void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
 {
        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
index 6165fe2c4504de07d0626c92892293db75d7354c..1bfa353d995cf5bb7ca4c4d1ce8a46ba686e55d6 100644 (file)
@@ -545,30 +545,14 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
        dev_priv->initial_height = height;
 }
 
-/**
- * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
- * taking place.
- * @dev: Pointer to the struct drm_device.
- *
- * Return: true if iommu present, false otherwise.
- */
-static bool vmw_assume_iommu(struct drm_device *dev)
-{
-       const struct dma_map_ops *ops = get_dma_ops(dev->dev);
-
-       return !dma_is_direct(ops) && ops &&
-               ops->map_page != dma_direct_map_page;
-}
-
 /**
  * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
  * system.
  *
  * @dev_priv: Pointer to a struct vmw_private
  *
- * This functions tries to determine the IOMMU setup and what actions
- * need to be taken by the driver to make system pages visible to the
- * device.
+ * This functions tries to determine what actions need to be taken by the
+ * driver to make system pages visible to the device.
  * If this function decides that DMA is not possible, it returns -EINVAL.
  * The driver may then try to disable features of the device that require
  * DMA.
@@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
        static const char *names[vmw_dma_map_max] = {
                [vmw_dma_phys] = "Using physical TTM page addresses.",
                [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
-               [vmw_dma_map_populate] = "Keeping DMA mappings.",
+               [vmw_dma_map_populate] = "Caching DMA mappings.",
                [vmw_dma_map_bind] = "Giving up DMA mappings early."};
 
        if (vmw_force_coherent)
                dev_priv->map_mode = vmw_dma_alloc_coherent;
-       else if (vmw_assume_iommu(dev_priv->dev))
-               dev_priv->map_mode = vmw_dma_map_populate;
-       else if (!vmw_force_iommu)
-               dev_priv->map_mode = vmw_dma_phys;
-       else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
-               dev_priv->map_mode = vmw_dma_alloc_coherent;
+       else if (vmw_restrict_iommu)
+               dev_priv->map_mode = vmw_dma_map_bind;
        else
                dev_priv->map_mode = vmw_dma_map_populate;
 
-       if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
-               dev_priv->map_mode = vmw_dma_map_bind;
-
        /* No TTM coherent page pool? FIXME: Ask TTM instead! */
         if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
            (dev_priv->map_mode == vmw_dma_alloc_coherent))
index 4030d64916f004a03781fc7c5b7a36173bcf344f..0c0eb43abf657f2369872b3f5026b3a965f4a298 100644 (file)
@@ -114,9 +114,13 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
 
 static void host1x_channel_set_streamid(struct host1x_channel *channel)
 {
-#if IS_ENABLED(CONFIG_IOMMU_API) &&  HOST1X_HW >= 6
+#if HOST1X_HW >= 6
+       u32 sid = 0x7f;
+#ifdef CONFIG_IOMMU_API
        struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
-       u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
+       if (spec)
+               sid = spec->ids[0] & 0xffff;
+#endif
 
        host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID);
 #endif
index 9b2b3fa479c462d1c4d7b8b02180ad22eb20a715..5e44ff1f20851a16afdb42bfdaf73caab97ebff5 100644 (file)
@@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
                ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
                                DP_COM_CONF_CSC_DEF_BOTH);
        } else {
-               if (flow->foreground.in_cs == flow->out_cs)
+               if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
+                   flow->foreground.in_cs == flow->out_cs)
                        /*
                         * foreground identical to output, apply color
                         * conversion on background
@@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
        struct ipu_dp_priv *priv = flow->priv;
        u32 reg, csc;
 
+       dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
+
        if (!dp->foreground)
                return;
 
@@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
 
        reg = readl(flow->base + DP_COM_CONF);
        csc = reg & DP_COM_CONF_CSC_DEF_MASK;
-       if (csc == DP_COM_CONF_CSC_DEF_FG)
-               reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+       reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+       if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
+               reg |= DP_COM_CONF_CSC_DEF_BG;
 
        reg &= ~DP_COM_CONF_FG_EN;
        writel(reg, flow->base + DP_COM_CONF);
@@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
        mutex_init(&priv->mutex);
 
        for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
+               priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
+               priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
                priv->flow[i].foreground.foreground = true;
                priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
                priv->flow[i].priv = priv;
index 1fce0076e7dc470e94cf561bf8c55b78cd92c6f2..b607286a0bc82f360a133b5dce204a8f2441ff5c 100644 (file)
@@ -680,6 +680,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                        break;
                }
 
+               if ((usage->hid & 0xf0) == 0xb0) {      /* SC - Display */
+                       switch (usage->hid & 0xf) {
+                       case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
+                       default: goto ignore;
+                       }
+                       break;
+               }
+
                /*
                 * Some lazy vendors declare 255 usages for System Control,
                 * leading to the creation of ABS_X|Y axis and too many others.
@@ -902,7 +910,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x06a: map_key_clear(KEY_GREEN);           break;
                case 0x06b: map_key_clear(KEY_BLUE);            break;
                case 0x06c: map_key_clear(KEY_YELLOW);          break;
-               case 0x06d: map_key_clear(KEY_ZOOM);            break;
+               case 0x06d: map_key_clear(KEY_ASPECT_RATIO);    break;
 
                case 0x06f: map_key_clear(KEY_BRIGHTNESSUP);            break;
                case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN);          break;
@@ -911,6 +919,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX);          break;
                case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO);         break;
 
+               case 0x079: map_key_clear(KEY_KBDILLUMUP);      break;
+               case 0x07a: map_key_clear(KEY_KBDILLUMDOWN);    break;
+               case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE);  break;
+
                case 0x082: map_key_clear(KEY_VIDEO_NEXT);      break;
                case 0x083: map_key_clear(KEY_LAST);            break;
                case 0x084: map_key_clear(KEY_ENTER);           break;
@@ -1022,6 +1034,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x22d: map_key_clear(KEY_ZOOMIN);          break;
                case 0x22e: map_key_clear(KEY_ZOOMOUT);         break;
                case 0x22f: map_key_clear(KEY_ZOOMRESET);       break;
+               case 0x232: map_key_clear(KEY_FULL_SCREEN);     break;
                case 0x233: map_key_clear(KEY_SCROLLUP);        break;
                case 0x234: map_key_clear(KEY_SCROLLDOWN);      break;
                case 0x238: /* AC Pan */
@@ -1045,6 +1058,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT);   break;
                case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL);   break;
 
+               case 0x29f: map_key_clear(KEY_SCALE);           break;
+
                default: map_key_clear(KEY_UNKNOWN);
                }
                break;
index 2dc628d4f1aee1b5c07593f85e7d75a6bdb3d0be..1412abcff01095cd001ece69bb80a69d714a80ba 100644 (file)
@@ -1980,7 +1980,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
 {
        struct i3c_dev_boardinfo *boardinfo;
        struct device *dev = &master->dev;
-       struct i3c_device_info info = { };
        enum i3c_addr_slot_status addrstatus;
        u32 init_dyn_addr = 0;
 
@@ -2012,8 +2011,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
 
        boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
 
-       if ((info.pid & GENMASK_ULL(63, 48)) ||
-           I3C_PID_RND_LOWER_32BITS(info.pid))
+       if ((boardinfo->pid & GENMASK_ULL(63, 48)) ||
+           I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
                return -EINVAL;
 
        boardinfo->init_dyn_addr = init_dyn_addr;
index 59279224e07fcefa460b1939212a5009eb8a3681..10c26ffaa8effe969464c6abc1ef6f198c069744 100644 (file)
@@ -300,7 +300,7 @@ to_dw_i3c_master(struct i3c_master_controller *master)
 
 static void dw_i3c_master_disable(struct dw_i3c_master *master)
 {
-       writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE,
+       writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
               master->regs + DEVICE_CTRL);
 }
 
index 7096e577b23f86f5f71aa36beae3e5526ab856d7..50f3ff386bea43f8853c5f5e7426c88c0e3f0851 100644 (file)
@@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev)
 
        mutex_lock(&data->mutex);
        ret = kxcjk1013_set_mode(data, OPERATION);
+       if (ret == 0)
+               ret = kxcjk1013_set_range(data, data->range);
        mutex_unlock(&data->mutex);
 
        return ret;
index ff5f2da2e1b134d369fbc6ce7180a9ebf0de4ec1..54d9978b274055da963ed282b6b94c0b4245fed5 100644 (file)
@@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
        if (sigma_delta->info->has_registers) {
                data[0] = reg << sigma_delta->info->addr_shift;
                data[0] |= sigma_delta->info->read_mask;
+               data[0] |= sigma_delta->comm;
                spi_message_add_tail(&t[0], &m);
        }
        spi_message_add_tail(&t[1], &m);
index 75d2f73582a3d7581e533afd361cdb7af7df46d0..596841a3c4db77f59f5fc7c3c3f0f1fddc21aab7 100644 (file)
@@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
                ret = wait_event_interruptible_timeout(st->wq_data_avail,
                                                       st->done,
                                                       msecs_to_jiffies(1000));
-               if (ret == 0)
-                       ret = -ETIMEDOUT;
-               if (ret < 0) {
-                       mutex_unlock(&st->lock);
-                       return ret;
-               }
-
-               *val = st->last_value;
 
+               /* Disable interrupts, regardless if adc conversion was
+                * successful or not
+                */
                at91_adc_writel(st, AT91_ADC_CHDR,
                                AT91_ADC_CH(chan->channel));
                at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
 
-               st->last_value = 0;
-               st->done = false;
+               if (ret > 0) {
+                       /* a valid conversion took place */
+                       *val = st->last_value;
+                       st->last_value = 0;
+                       st->done = false;
+                       ret = IIO_VAL_INT;
+               } else if (ret == 0) {
+                       /* conversion timeout */
+                       dev_err(&idev->dev, "ADC Channel %d timeout.\n",
+                               chan->channel);
+                       ret = -ETIMEDOUT;
+               }
+
                mutex_unlock(&st->lock);
-               return IIO_VAL_INT;
+               return ret;
 
        case IIO_CHAN_INFO_SCALE:
                *val = st->vref_mv;
index b13c61539d46baf3490be318342dac64b6b1dc4b..6401ca7a9a2072e9760144c7b71717d985db6d27 100644 (file)
@@ -1292,6 +1292,7 @@ static int xadc_probe(struct platform_device *pdev)
 
 err_free_irq:
        free_irq(xadc->irq, indio_dev);
+       cancel_delayed_work_sync(&xadc->zynq_unmask_work);
 err_clk_disable_unprepare:
        clk_disable_unprepare(xadc->clk);
 err_free_samplerate_trigger:
@@ -1321,8 +1322,8 @@ static int xadc_remove(struct platform_device *pdev)
                iio_triggered_buffer_cleanup(indio_dev);
        }
        free_irq(xadc->irq, indio_dev);
+       cancel_delayed_work_sync(&xadc->zynq_unmask_work);
        clk_disable_unprepare(xadc->clk);
-       cancel_delayed_work(&xadc->zynq_unmask_work);
        kfree(xadc->data);
        kfree(indio_dev->channels);
 
index d5d146e9e372305852e7dc20c5492fc48e5218b1..92c684d2b67ecfd7992327e8703812c5f7cdd94f 100644 (file)
@@ -64,6 +64,7 @@ config IAQCORE
 config PMS7003
        tristate "Plantower PMS7003 particulate matter sensor"
        depends on SERIAL_DEV_BUS
+       select IIO_TRIGGERED_BUFFER
        help
          Say Y here to build support for the Plantower PMS7003 particulate
          matter sensor.
@@ -71,6 +72,19 @@ config PMS7003
          To compile this driver as a module, choose M here: the module will
          be called pms7003.
 
+config SENSIRION_SGP30
+       tristate "Sensirion SGPxx gas sensors"
+       depends on I2C
+       select CRC8
+       help
+         Say Y here to build I2C interface support for the following
+         Sensirion SGP gas sensors:
+           * SGP30 gas sensor
+           * SGPC3 low power gas sensor
+
+         To compile this driver as module, choose M here: the
+         module will be called sgp30.
+
 config SPS30
        tristate "SPS30 particulate matter sensor"
        depends on I2C
index 0ae89b87e2d6451fbe177e0978b2179ab4c4487b..4edc5d21cb9fa63739d70fee23976832f1cf9313 100644 (file)
@@ -2,11 +2,9 @@
 #ifndef BME680_H_
 #define BME680_H_
 
-#define BME680_REG_CHIP_I2C_ID                 0xD0
-#define BME680_REG_CHIP_SPI_ID                 0x50
+#define BME680_REG_CHIP_ID                     0xD0
 #define   BME680_CHIP_ID_VAL                   0x61
-#define BME680_REG_SOFT_RESET_I2C              0xE0
-#define BME680_REG_SOFT_RESET_SPI              0x60
+#define BME680_REG_SOFT_RESET                  0xE0
 #define   BME680_CMD_SOFTRESET                 0xB6
 #define BME680_REG_STATUS                      0x73
 #define   BME680_SPI_MEM_PAGE_BIT              BIT(4)
index 70c1fe4366f4c6a17100b469452f8903281e665e..ccde4c65ff9340b2bcc4a730dc978daf918fa08c 100644 (file)
@@ -63,9 +63,23 @@ struct bme680_data {
        s32 t_fine;
 };
 
+static const struct regmap_range bme680_volatile_ranges[] = {
+       regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB),
+       regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS),
+       regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG),
+};
+
+static const struct regmap_access_table bme680_volatile_table = {
+       .yes_ranges     = bme680_volatile_ranges,
+       .n_yes_ranges   = ARRAY_SIZE(bme680_volatile_ranges),
+};
+
 const struct regmap_config bme680_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .max_register = 0xef,
+       .volatile_table = &bme680_volatile_table,
+       .cache_type = REGCACHE_RBTREE,
 };
 EXPORT_SYMBOL(bme680_regmap_config);
 
@@ -316,6 +330,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
        s64 var1, var2, var3;
        s16 calc_temp;
 
+       /* If the calibration is invalid, attempt to reload it */
+       if (!calib->par_t2)
+               bme680_read_calib(data, calib);
+
        var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
        var2 = (var1 * calib->par_t2) >> 11;
        var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
@@ -583,8 +601,7 @@ static int bme680_gas_config(struct bme680_data *data)
        return ret;
 }
 
-static int bme680_read_temp(struct bme680_data *data,
-                           int *val, int *val2)
+static int bme680_read_temp(struct bme680_data *data, int *val)
 {
        struct device *dev = regmap_get_device(data->regmap);
        int ret;
@@ -617,10 +634,9 @@ static int bme680_read_temp(struct bme680_data *data,
         * compensate_press/compensate_humid to get compensated
         * pressure/humidity readings.
         */
-       if (val && val2) {
-               *val = comp_temp;
-               *val2 = 100;
-               return IIO_VAL_FRACTIONAL;
+       if (val) {
+               *val = comp_temp * 10; /* Centidegrees to millidegrees */
+               return IIO_VAL_INT;
        }
 
        return ret;
@@ -635,7 +651,7 @@ static int bme680_read_press(struct bme680_data *data,
        s32 adc_press;
 
        /* Read and compensate temperature to get a reading of t_fine */
-       ret = bme680_read_temp(data, NULL, NULL);
+       ret = bme680_read_temp(data, NULL);
        if (ret < 0)
                return ret;
 
@@ -668,7 +684,7 @@ static int bme680_read_humid(struct bme680_data *data,
        u32 comp_humidity;
 
        /* Read and compensate temperature to get a reading of t_fine */
-       ret = bme680_read_temp(data, NULL, NULL);
+       ret = bme680_read_temp(data, NULL);
        if (ret < 0)
                return ret;
 
@@ -761,7 +777,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_PROCESSED:
                switch (chan->type) {
                case IIO_TEMP:
-                       return bme680_read_temp(data, val, val2);
+                       return bme680_read_temp(data, val);
                case IIO_PRESSURE:
                        return bme680_read_press(data, val, val2);
                case IIO_HUMIDITYRELATIVE:
@@ -867,8 +883,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
 {
        struct iio_dev *indio_dev;
        struct bme680_data *data;
+       unsigned int val;
        int ret;
 
+       ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
+                          BME680_CMD_SOFTRESET);
+       if (ret < 0) {
+               dev_err(dev, "Failed to reset chip\n");
+               return ret;
+       }
+
+       ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
+       if (ret < 0) {
+               dev_err(dev, "Error reading chip ID\n");
+               return ret;
+       }
+
+       if (val != BME680_CHIP_ID_VAL) {
+               dev_err(dev, "Wrong chip ID, got %x expected %x\n",
+                               val, BME680_CHIP_ID_VAL);
+               return -ENODEV;
+       }
+
        indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
        if (!indio_dev)
                return -ENOMEM;
index b2f805b6b36a4904fea4183aa66cce10304b6a67..de9c9e3d23ea347824f0a480031bf46e11021c6a 100644 (file)
@@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
 {
        struct regmap *regmap;
        const char *name = NULL;
-       unsigned int val;
-       int ret;
 
        regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
        if (IS_ERR(regmap)) {
@@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
                return PTR_ERR(regmap);
        }
 
-       ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
-                          BME680_CMD_SOFTRESET);
-       if (ret < 0) {
-               dev_err(&client->dev, "Failed to reset chip\n");
-               return ret;
-       }
-
-       ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
-       if (ret < 0) {
-               dev_err(&client->dev, "Error reading I2C chip ID\n");
-               return ret;
-       }
-
-       if (val != BME680_CHIP_ID_VAL) {
-               dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
-                               val, BME680_CHIP_ID_VAL);
-               return -ENODEV;
-       }
-
        if (id)
                name = id->name;
 
index d0b7bdd3f066021938f436b3398a5c5532884a35..3b838068a7e48d7378597aeec7eef7da5953cd58 100644 (file)
 
 #include "bme680.h"
 
+struct bme680_spi_bus_context {
+       struct spi_device *spi;
+       u8 current_page;
+};
+
+/*
+ * In SPI mode there are only 7 address bits, a "page" register determines
+ * which part of the 8-bit range is active. This function looks at the address
+ * and writes the page selection bit if needed
+ */
+static int bme680_regmap_spi_select_page(
+       struct bme680_spi_bus_context *ctx, u8 reg)
+{
+       struct spi_device *spi = ctx->spi;
+       int ret;
+       u8 buf[2];
+       u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
+
+       if (page == ctx->current_page)
+               return 0;
+
+       /*
+        * Data sheet claims we're only allowed to change bit 4, so we must do
+        * a read-modify-write on each and every page select
+        */
+       buf[0] = BME680_REG_STATUS;
+       ret = spi_write_then_read(spi, buf, 1, buf + 1, 1);
+       if (ret < 0) {
+               dev_err(&spi->dev, "failed to set page %u\n", page);
+               return ret;
+       }
+
+       buf[0] = BME680_REG_STATUS;
+       if (page)
+               buf[1] |= BME680_SPI_MEM_PAGE_BIT;
+       else
+               buf[1] &= ~BME680_SPI_MEM_PAGE_BIT;
+
+       ret = spi_write(spi, buf, 2);
+       if (ret < 0) {
+               dev_err(&spi->dev, "failed to set page %u\n", page);
+               return ret;
+       }
+
+       ctx->current_page = page;
+
+       return 0;
+}
+
 static int bme680_regmap_spi_write(void *context, const void *data,
                                   size_t count)
 {
-       struct spi_device *spi = context;
+       struct bme680_spi_bus_context *ctx = context;
+       struct spi_device *spi = ctx->spi;
+       int ret;
        u8 buf[2];
 
        memcpy(buf, data, 2);
+
+       ret = bme680_regmap_spi_select_page(ctx, buf[0]);
+       if (ret)
+               return ret;
+
        /*
         * The SPI register address (= full register address without bit 7)
         * and the write command (bit7 = RW = '0')
         */
        buf[0] &= ~0x80;
 
-       return spi_write_then_read(spi, buf, 2, NULL, 0);
+       return spi_write(spi, buf, 2);
 }
 
 static int bme680_regmap_spi_read(void *context, const void *reg,
                                  size_t reg_size, void *val, size_t val_size)
 {
-       struct spi_device *spi = context;
+       struct bme680_spi_bus_context *ctx = context;
+       struct spi_device *spi = ctx->spi;
+       int ret;
+       u8 addr = *(const u8 *)reg;
+
+       ret = bme680_regmap_spi_select_page(ctx, addr);
+       if (ret)
+               return ret;
 
-       return spi_write_then_read(spi, reg, reg_size, val, val_size);
+       addr |= 0x80; /* bit7 = RW = '1' */
+
+       return spi_write_then_read(spi, &addr, 1, val, val_size);
 }
 
 static struct regmap_bus bme680_regmap_bus = {
@@ -46,8 +111,8 @@ static struct regmap_bus bme680_regmap_bus = {
 static int bme680_spi_probe(struct spi_device *spi)
 {
        const struct spi_device_id *id = spi_get_device_id(spi);
+       struct bme680_spi_bus_context *bus_context;
        struct regmap *regmap;
-       unsigned int val;
        int ret;
 
        spi->bits_per_word = 8;
@@ -57,45 +122,21 @@ static int bme680_spi_probe(struct spi_device *spi)
                return ret;
        }
 
+       bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
+       if (!bus_context)
+               return -ENOMEM;
+
+       bus_context->spi = spi;
+       bus_context->current_page = 0xff; /* Undefined on warm boot */
+
        regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
-                                 &spi->dev, &bme680_regmap_config);
+                                 bus_context, &bme680_regmap_config);
        if (IS_ERR(regmap)) {
                dev_err(&spi->dev, "Failed to register spi regmap %d\n",
                                (int)PTR_ERR(regmap));
                return PTR_ERR(regmap);
        }
 
-       ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
-                          BME680_CMD_SOFTRESET);
-       if (ret < 0) {
-               dev_err(&spi->dev, "Failed to reset chip\n");
-               return ret;
-       }
-
-       /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
-       ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
-       if (ret < 0) {
-               dev_err(&spi->dev, "Error reading SPI chip ID\n");
-               return ret;
-       }
-
-       if (val != BME680_CHIP_ID_VAL) {
-               dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
-                               val, BME680_CHIP_ID_VAL);
-               return -ENODEV;
-       }
-       /*
-        * select Page 1 of spi_mem_page to enable access to
-        * to registers from address 0x00 to 0x7F.
-        */
-       ret = regmap_write_bits(regmap, BME680_REG_STATUS,
-                               BME680_SPI_MEM_PAGE_BIT,
-                               BME680_SPI_MEM_PAGE_1_VAL);
-       if (ret < 0) {
-               dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
-               return ret;
-       }
-
        return bme680_core_probe(&spi->dev, regmap, id->name);
 }
 
index 89cb0066a6e0839f49fd68fb2395e8425b174c90..8d76afb87d87c58322b3ee8835ea31f5edc5a834 100644 (file)
@@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
                         * Do not use IIO_DEGREE_TO_RAD to avoid precision
                         * loss. Round to the nearest integer.
                         */
-                       *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
-                       *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
-                       ret = IIO_VAL_FRACTIONAL;
+                       *val = 0;
+                       *val2 = div_s64(val64 * 3141592653ULL,
+                                       180 << (CROS_EC_SENSOR_BITS - 1));
+                       ret = IIO_VAL_INT_PLUS_NANO;
                        break;
                case MOTIONSENSE_TYPE_MAG:
                        /*
index 6d71fd905e29d69b0a5b1afa99c5451037333153..c701a45469f6436746b8c7e7b2da0680c45829ec 100644 (file)
@@ -92,6 +92,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
 
        inoutbuf[0] = 0x60; /* write EEPROM */
        inoutbuf[0] |= data->ref_mode << 3;
+       inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
        inoutbuf[1] = data->dac_value >> 4;
        inoutbuf[2] = (data->dac_value & 0xf) << 4;
 
index 63ca31628a93af8454f5fce9ced15d752f28f16d..92c07ab826eb32c9d4665728159dbd4358cefc80 100644 (file)
@@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
                return bmg160_get_filter(data, val);
        case IIO_CHAN_INFO_SCALE:
-               *val = 0;
                switch (chan->type) {
                case IIO_TEMP:
-                       *val2 = 500000;
-                       return IIO_VAL_INT_PLUS_MICRO;
+                       *val = 500;
+                       return IIO_VAL_INT;
                case IIO_ANGL_VEL:
                {
                        int i;
@@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
                        for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
                                if (bmg160_scale_table[i].dps_range ==
                                                        data->dps_range) {
+                                       *val = 0;
                                        *val2 = bmg160_scale_table[i].scale;
                                        return IIO_VAL_INT_PLUS_MICRO;
                                }
index 77fac81a3adce2245fe0bf499b60a382c08af98b..5ddebede31a6f6f3625263893ad27a0f3fb68b88 100644 (file)
@@ -29,7 +29,8 @@
 
 #include "mpu3050.h"
 
-#define MPU3050_CHIP_ID                0x69
+#define MPU3050_CHIP_ID                0x68
+#define MPU3050_CHIP_ID_MASK   0x7E
 
 /*
  * Register map: anything suffixed *_H is a big-endian high byte and always
@@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev,
                goto err_power_down;
        }
 
-       if (val != MPU3050_CHIP_ID) {
-               dev_err(dev, "unsupported chip id %02x\n", (u8)val);
+       if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
+               dev_err(dev, "unsupported chip id %02x\n",
+                               (u8)(val & MPU3050_CHIP_ID_MASK));
                ret = -ENODEV;
                goto err_power_down;
        }
index cd5bfe39591bb2b2d44b3848cc2d84ef2d4a38f2..dadd921a4a30fdb527faf9e0b8e359ba6fa61bc0 100644 (file)
@@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
        const unsigned long *mask;
        unsigned long *trialmask;
 
-       trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
-                                 sizeof(*trialmask),
-                                 GFP_KERNEL);
+       trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+                           sizeof(*trialmask), GFP_KERNEL);
        if (trialmask == NULL)
                return -ENOMEM;
        if (!indio_dev->masklength) {
index 4700fd5d8c90a6ebaee08659b4bb17ca9db105a4..9c4d92115504ae093b8990ffdd4bb649f2796228 100644 (file)
@@ -1743,10 +1743,10 @@ EXPORT_SYMBOL(__iio_device_register);
  **/
 void iio_device_unregister(struct iio_dev *indio_dev)
 {
-       mutex_lock(&indio_dev->info_exist_lock);
-
        cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
 
+       mutex_lock(&indio_dev->info_exist_lock);
+
        iio_device_unregister_debugfs(indio_dev);
 
        iio_disable_all_buffers(indio_dev);
index ea0bc6885517b30da5c7a20d6e3805c582f3824b..32cc8fe7902f13dd5f6289ca36f5a8aca84a172b 100644 (file)
@@ -160,6 +160,7 @@ struct ib_uverbs_file {
 
        struct mutex umap_lock;
        struct list_head umaps;
+       struct page *disassociate_page;
 
        struct idr              idr;
        /* spinlock protects write access to idr */
index 70b7d80431a9b935b9a7ffa6fa50be6601f9c4a0..7843e89235c34b4e6831dedaebb7d2170986099d 100644 (file)
@@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
                kref_put(&file->async_file->ref,
                         ib_uverbs_release_async_event_file);
        put_device(&file->device->dev);
+
+       if (file->disassociate_page)
+               __free_pages(file->disassociate_page, 0);
        kfree(file);
 }
 
@@ -877,9 +880,50 @@ static void rdma_umap_close(struct vm_area_struct *vma)
        kfree(priv);
 }
 
+/*
+ * Once the zap_vma_ptes has been called touches to the VMA will come here and
+ * we return a dummy writable zero page for all the pfns.
+ */
+static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
+{
+       struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
+       struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
+       vm_fault_t ret = 0;
+
+       if (!priv)
+               return VM_FAULT_SIGBUS;
+
+       /* Read only pages can just use the system zero page. */
+       if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
+               vmf->page = ZERO_PAGE(vmf->vm_start);
+               get_page(vmf->page);
+               return 0;
+       }
+
+       mutex_lock(&ufile->umap_lock);
+       if (!ufile->disassociate_page)
+               ufile->disassociate_page =
+                       alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
+
+       if (ufile->disassociate_page) {
+               /*
+                * This VMA is forced to always be shared so this doesn't have
+                * to worry about COW.
+                */
+               vmf->page = ufile->disassociate_page;
+               get_page(vmf->page);
+       } else {
+               ret = VM_FAULT_SIGBUS;
+       }
+       mutex_unlock(&ufile->umap_lock);
+
+       return ret;
+}
+
 static const struct vm_operations_struct rdma_umap_ops = {
        .open = rdma_umap_open,
        .close = rdma_umap_close,
+       .fault = rdma_umap_fault,
 };
 
 static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
@@ -889,6 +933,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
        struct ib_uverbs_file *ufile = ucontext->ufile;
        struct rdma_umap_priv *priv;
 
+       if (!(vma->vm_flags & VM_SHARED))
+               return ERR_PTR(-EINVAL);
+
        if (vma->vm_end - vma->vm_start != size)
                return ERR_PTR(-EINVAL);
 
@@ -992,7 +1039,9 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
                 * at a time to get the lock ordering right. Typically there
                 * will only be one mm, so no big deal.
                 */
-               down_write(&mm->mmap_sem);
+               down_read(&mm->mmap_sem);
+               if (!mmget_still_valid(mm))
+                       goto skip_mm;
                mutex_lock(&ufile->umap_lock);
                list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
                                          list) {
@@ -1004,10 +1053,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
 
                        zap_vma_ptes(vma, vma->vm_start,
                                     vma->vm_end - vma->vm_start);
-                       vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
                }
                mutex_unlock(&ufile->umap_lock);
-               up_write(&mm->mmap_sem);
+       skip_mm:
+               up_read(&mm->mmap_sem);
                mmput(mm);
        }
 }
index 66cdf625534ff8901a6efdf90295eaee3cc0145f..60cf9f03e9414e98e97f325cc7f0b937af36bcc0 100644 (file)
@@ -533,7 +533,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
 
 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
 {
-       if (attr->qp_type == IB_QPT_XRC_TGT)
+       if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
                return 0;
 
        return 1;
index 531ff20b32ade6ccb4d0b3533bc1f8ceceed1b26..d3dd290ae1b176d609c14937c4ffedc2fe992a20 100644 (file)
@@ -1119,6 +1119,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                if (MLX5_CAP_GEN(mdev, qp_packet_based))
                        resp.flags |=
                                MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
+
+               resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
        }
 
        if (field_avail(typeof(resp), sw_parsing_caps,
@@ -2066,6 +2068,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
 
        if (vma->vm_flags & VM_WRITE)
                return -EPERM;
+       vma->vm_flags &= ~VM_MAYWRITE;
 
        if (!dev->mdev->clock_info_page)
                return -EOPNOTSUPP;
@@ -2231,19 +2234,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
 
                if (vma->vm_flags & VM_WRITE)
                        return -EPERM;
+               vma->vm_flags &= ~VM_MAYWRITE;
 
                /* Don't expose to user-space information it shouldn't have */
                if (PAGE_SIZE > 4096)
                        return -EOPNOTSUPP;
 
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
                pfn = (dev->mdev->iseg_base +
                       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
                        PAGE_SHIFT;
-               if (io_remap_pfn_range(vma, vma->vm_start, pfn,
-                                      PAGE_SIZE, vma->vm_page_prot))
-                       return -EAGAIN;
-               break;
+               return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
+                                        PAGE_SIZE,
+                                        pgprot_noncached(vma->vm_page_prot));
        case MLX5_IB_MMAP_CLOCK_INFO:
                return mlx5_ib_mmap_clock_info_page(dev, vma, context);
 
index 7cd006da1daef05cd335dc77cda8281e179630c4..8870c350fda0b109cc4cb98c9787fd0452821865 100644 (file)
@@ -1818,13 +1818,16 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
 
        rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
 
-       if (rcqe_sz == 128) {
-               MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+       if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
+               if (rcqe_sz == 128)
+                       MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+
                return;
        }
 
-       if (init_attr->qp_type != MLX5_IB_QPT_DCT)
-               MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
+       MLX5_SET(qpc, qpc, cs_res,
+                rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
+                                 MLX5_RES_SCAT_DATA32_CQE);
 }
 
 static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
index 7287950434969243335e904aa8045d7588ca2d2f..0bb6e39dd03a730783249586d409be6477a6d317 100644 (file)
@@ -608,11 +608,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
        if (unlikely(mapped_segs == mr->mr.max_segs))
                return -ENOMEM;
 
-       if (mr->mr.length == 0) {
-               mr->mr.user_base = addr;
-               mr->mr.iova = addr;
-       }
-
        m = mapped_segs / RVT_SEGSZ;
        n = mapped_segs % RVT_SEGSZ;
        mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@@ -630,17 +625,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
  * @sg_nents: number of entries in sg
  * @sg_offset: offset in bytes into sg
  *
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
  * Return: number of sg elements mapped to the memory region
  */
 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
                  int sg_nents, unsigned int *sg_offset)
 {
        struct rvt_mr *mr = to_imr(ibmr);
+       int ret;
 
        mr->mr.length = 0;
        mr->mr.page_shift = PAGE_SHIFT;
-       return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
-                             rvt_set_page);
+       ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+       mr->mr.user_base = ibmr->iova;
+       mr->mr.iova = ibmr->iova;
+       mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+       mr->mr.length = (size_t)ibmr->length;
+       return ret;
 }
 
 /**
@@ -671,6 +673,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
        ibmr->rkey = key;
        mr->mr.lkey = key;
        mr->mr.access_flags = access;
+       mr->mr.iova = ibmr->iova;
        atomic_set(&mr->mr.lkey_invalid, 0);
 
        return 0;
index a878351f16439859e3931ec71506c705eb9a6f6e..52d7f55fca329c09c9788cb8bcd91509a5f07426 100644 (file)
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121
 
 config KEYBOARD_SNVS_PWRKEY
        tristate "IMX SNVS Power Key Driver"
-       depends on SOC_IMX6SX || SOC_IMX7D
+       depends on ARCH_MXC || COMPILE_TEST
        depends on OF
        help
          This is the snvs powerkey driver for the Freescale i.MX application
index effb63205d3d7783e8e4e598407332892ef6aae2..4c67cf30a5d9ab14bff5f5c53d289ba347d241f4 100644 (file)
@@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
                return error;
        }
 
+       pdata->input = input;
+       platform_set_drvdata(pdev, pdata);
+
        error = devm_request_irq(&pdev->dev, pdata->irq,
                               imx_snvs_pwrkey_interrupt,
                               0, pdev->name, pdev);
@@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
                return error;
        }
 
-       pdata->input = input;
-       platform_set_drvdata(pdev, pdata);
-
        device_init_wakeup(&pdev->dev, pdata->wakeup);
 
        return 0;
index 628ef617bb2f7f51301d5b422905140fa53b1c1a..f9525d6f0bfe810c9ab1c2bd0a2a971f2e9695b4 100644 (file)
@@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0600", 0 },
        { "ELAN0601", 0 },
        { "ELAN0602", 0 },
+       { "ELAN0603", 0 },
+       { "ELAN0604", 0 },
        { "ELAN0605", 0 },
+       { "ELAN0606", 0 },
+       { "ELAN0607", 0 },
        { "ELAN0608", 0 },
        { "ELAN0609", 0 },
        { "ELAN060B", 0 },
        { "ELAN060C", 0 },
+       { "ELAN060F", 0 },
+       { "ELAN0610", 0 },
        { "ELAN0611", 0 },
        { "ELAN0612", 0 },
+       { "ELAN0615", 0 },
+       { "ELAN0616", 0 },
        { "ELAN0617", 0 },
        { "ELAN0618", 0 },
+       { "ELAN0619", 0 },
+       { "ELAN061A", 0 },
+       { "ELAN061B", 0 },
        { "ELAN061C", 0 },
        { "ELAN061D", 0 },
        { "ELAN061E", 0 },
+       { "ELAN061F", 0 },
        { "ELAN0620", 0 },
        { "ELAN0621", 0 },
        { "ELAN0622", 0 },
+       { "ELAN0623", 0 },
+       { "ELAN0624", 0 },
+       { "ELAN0625", 0 },
+       { "ELAN0626", 0 },
+       { "ELAN0627", 0 },
+       { "ELAN0628", 0 },
+       { "ELAN0629", 0 },
+       { "ELAN062A", 0 },
+       { "ELAN062B", 0 },
+       { "ELAN062C", 0 },
+       { "ELAN062D", 0 },
+       { "ELAN0631", 0 },
+       { "ELAN0632", 0 },
        { "ELAN1000", 0 },
        { }
 };
index fc3ab93b7aea454475ee324eecee91470c4a9dc3..7fb358f961957507969db706c780459b937d2ba0 100644 (file)
@@ -860,7 +860,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
 
        error = rmi_register_function(fn);
        if (error)
-               goto err_put_fn;
+               return error;
 
        if (pdt->function_number == 0x01)
                data->f01_container = fn;
@@ -870,10 +870,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
        list_add_tail(&fn->node, &data->function_list);
 
        return RMI_SCAN_CONTINUE;
-
-err_put_fn:
-       put_device(&fn->dev);
-       return error;
 }
 
 void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
index df64d6aed4f7e10b8eb78eb78619a15d7bcaaf56..93901ebd122a504e7e96c35a17c100ae1ea607e3 100644 (file)
@@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
        }
 
        rc = f11_write_control_regs(fn, &f11->sens_query,
-                          &f11->dev_controls, fn->fd.query_base_addr);
+                          &f11->dev_controls, fn->fd.control_base_addr);
        if (rc)
                dev_warn(&fn->dev, "Failed to write control registers\n");
 
index aa729078463601464e0c0c90d0c319c83f82214a..0390603170b405862ac63b35310f64e3d5743b7a 100644 (file)
 #define AR71XX_RESET_REG_MISC_INT_ENABLE       4
 
 #define ATH79_MISC_IRQ_COUNT                   32
+#define ATH79_MISC_PERF_IRQ                    5
+
+static int ath79_perfcount_irq;
+
+int get_c0_perfcount_int(void)
+{
+       return ath79_perfcount_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 static void ath79_misc_irq_handler(struct irq_desc *desc)
 {
@@ -113,6 +122,8 @@ static void __init ath79_misc_intc_domain_init(
 {
        void __iomem *base = domain->host_data;
 
+       ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
+
        /* Disable and clear all interrupts */
        __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
        __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
index 4ab8b1b6608f7136365f91d713f65647a8271296..a14e35d405387d4dc43bf672139c773bf4b05d2f 100644 (file)
@@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
        struct sock *sk = sock->sk;
        int err = 0;
 
-       if (!maddr || maddr->family != AF_ISDN)
+       if (addr_len < sizeof(struct sockaddr_mISDN))
                return -EINVAL;
 
-       if (addr_len < sizeof(struct sockaddr_mISDN))
+       if (!maddr || maddr->family != AF_ISDN)
                return -EINVAL;
 
        lock_sock(sk);
index 39f832d2728899a8575763834d53bb438ae262bd..36d0d5c9cfbad80a3d191fa680d8e23f3ee908c1 100644 (file)
@@ -1184,6 +1184,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
        struct fastrpc_session_ctx *sess;
        struct device *dev = &pdev->dev;
        int i, sessions = 0;
+       int rc;
 
        cctx = dev_get_drvdata(dev->parent);
        if (!cctx)
@@ -1213,7 +1214,11 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
        }
        cctx->sesscount++;
        spin_unlock(&cctx->lock);
-       dma_set_mask(dev, DMA_BIT_MASK(32));
+       rc = dma_set_mask(dev, DMA_BIT_MASK(32));
+       if (rc) {
+               dev_err(dev, "32-bit DMA enable failed\n");
+               return rc;
+       }
 
        return 0;
 }
index ea979ebd62fb8c5f30d08b052a0e481325470ece..3c509e19d69dc430a668825ab935927f8441b468 100644 (file)
@@ -1688,12 +1688,11 @@ static void goya_init_golden_registers(struct hl_device *hdev)
 
        /*
         * Workaround for H2 #HW-23 bug
-        * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
-        * to 16 on KMD DMA
-        * We need to limit only these DMAs because the user can only read
+        * Set DMA max outstanding read requests to 240 on DMA CH 1.
+        * This limitation is still large enough to not affect Gen4 bandwidth.
+        * We need to only limit that DMA channel because the user can only read
         * from Host using DMA CH 1
         */
-       WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
        WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
 
        goya->hw_cap_initialized |= HW_CAP_GOLDEN;
@@ -3693,7 +3692,7 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
         * WA for HW-23.
         * We can't allow user to read from Host using QMANs other than 1.
         */
-       if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
+       if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
                hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
                                le32_to_cpu(user_dma_pkt->tsize),
                                hdev->asic_prop.va_space_host_start_address,
index b59708c35fafe87a926f4b47c81d89ba56f695e4..ee610721098e628d01ad988b1a9cad40aeef6f70 100644 (file)
@@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this,
                return NOTIFY_DONE;
 
        if (event_dev->flags & IFF_MASTER) {
+               int ret;
+
                netdev_dbg(event_dev, "IFF_MASTER\n");
-               return bond_master_netdev_event(event, event_dev);
+               ret = bond_master_netdev_event(event, event_dev);
+               if (ret != NOTIFY_DONE)
+                       return ret;
        }
 
        if (event_dev->flags & IFF_SLAVE) {
index 9e07b469066a4bbc41119e0801d716a25dce9a55..156fbc5601ca3ece9b07c7ab14314ff4883127f7 100644 (file)
@@ -1721,7 +1721,7 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
        adapter->soft_stats.scc += smb->tx_1_col;
        adapter->soft_stats.mcc += smb->tx_2_col;
        adapter->soft_stats.latecol += smb->tx_late_col;
-       adapter->soft_stats.tx_underun += smb->tx_underrun;
+       adapter->soft_stats.tx_underrun += smb->tx_underrun;
        adapter->soft_stats.tx_trunc += smb->tx_trunc;
        adapter->soft_stats.tx_pause += smb->tx_pause;
 
@@ -3179,7 +3179,7 @@ static struct atl1_stats atl1_gstrings_stats[] = {
        {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
        {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
        {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
-       {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
+       {"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)},
        {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
        {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
        {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
index 34a58cd846a05929f5c22dfa46619f6240e85e5e..eacff19ea05b820bec71fdba222b7a7e263141db 100644 (file)
@@ -681,7 +681,7 @@ struct atl1_sft_stats {
        u64 scc;                /* packets TX after a single collision */
        u64 mcc;                /* packets TX after multiple collisions */
        u64 latecol;            /* TX packets w/ late collisions */
-       u64 tx_underun;         /* TX packets aborted due to TX FIFO underrun
+       u64 tx_underrun;        /* TX packets aborted due to TX FIFO underrun
                                 * or TRD FIFO underrun */
        u64 tx_trunc;           /* TX packets truncated due to size > MTU */
        u64 rx_pause;           /* num Pause packets received. */
index d99317b3d891b0a608aafef51352eea6ba58fbb2..98da0fa27192ddbab7c04651854b0fd94baa6b2a 100644 (file)
@@ -553,7 +553,7 @@ static void atl2_intr_tx(struct atl2_adapter *adapter)
                        netdev->stats.tx_aborted_errors++;
                if (txs->late_col)
                        netdev->stats.tx_window_errors++;
-               if (txs->underun)
+               if (txs->underrun)
                        netdev->stats.tx_fifo_errors++;
        } while (1);
 
index c64a6bdfa7ae4927da9dc3c38ac4ca5b8956354e..25ec84cb48535b1a7dc0180e176af77aabf0bf03 100644 (file)
@@ -260,7 +260,7 @@ struct tx_pkt_status {
        unsigned multi_col:1;
        unsigned late_col:1;
        unsigned abort_col:1;
-       unsigned underun:1;     /* current packet is aborted
+       unsigned underrun:1;    /* current packet is aborted
                                 * due to txram underrun */
        unsigned:3;             /* reserved */
        unsigned update:1;      /* always 1'b1 in tx_status_buf */
index a9bdc21873d32f31620ac169f8aff5b76cd02f7f..10ff37d6dc783b796c690a4d73bc90caa4cad931 100644 (file)
@@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
        bnx2x_sample_bulletin(bp);
 
        if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
-               BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+               BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
                rc = -EINVAL;
                goto out;
        }
index 28eac9056211314504f7c1afdd1a4616bce44e68..c032bef1b776d74ea4886e8fbddca40c8b7dd868 100644 (file)
 #define DRV_NAME       "nicvf"
 #define DRV_VERSION    "1.0"
 
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU    (1530 - ETH_HLEN - VLAN_HLEN * 2)
+
 /* Supported devices */
 static const struct pci_device_id nicvf_id_table[] = {
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1582,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
        struct nicvf *nic = netdev_priv(netdev);
        int orig_mtu = netdev->mtu;
 
+       /* For now just support only the usual MTU sized frames,
+        * plus some headroom for VLAN, QinQ.
+        */
+       if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+               netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+                           netdev->mtu);
+               return -EINVAL;
+       }
+
        netdev->mtu = new_mtu;
 
        if (!netif_running(netdev))
@@ -1830,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
        bool bpf_attached = false;
        int ret = 0;
 
-       /* For now just support only the usual MTU sized frames */
-       if (prog && (dev->mtu > 1500)) {
+       /* For now just support only the usual MTU sized frames,
+        * plus some headroom for VLAN, QinQ.
+        */
+       if (prog && dev->mtu > MAX_XDP_MTU) {
                netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
                            dev->mtu);
                return -EOPNOTSUPP;
index 697c2427f2b70c06c87dd00ae23d4e0b06d4fc3d..a96ad20ee4843e9cdd02c55a3e7286a51679efea 100644 (file)
@@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
        int ret;
 
        if (enable) {
-               ret = clk_prepare_enable(fep->clk_ahb);
-               if (ret)
-                       return ret;
-
                ret = clk_prepare_enable(fep->clk_enet_out);
                if (ret)
-                       goto failed_clk_enet_out;
+                       return ret;
 
                if (fep->clk_ptp) {
                        mutex_lock(&fep->ptp_clk_mutex);
@@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 
                phy_reset_after_clk_enable(ndev->phydev);
        } else {
-               clk_disable_unprepare(fep->clk_ahb);
                clk_disable_unprepare(fep->clk_enet_out);
                if (fep->clk_ptp) {
                        mutex_lock(&fep->ptp_clk_mutex);
@@ -1885,8 +1880,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 failed_clk_ptp:
        if (fep->clk_enet_out)
                clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
-               clk_disable_unprepare(fep->clk_ahb);
 
        return ret;
 }
@@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev)
        ret = clk_prepare_enable(fep->clk_ipg);
        if (ret)
                goto failed_clk_ipg;
+       ret = clk_prepare_enable(fep->clk_ahb);
+       if (ret)
+               goto failed_clk_ahb;
 
        fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
@@ -3563,6 +3559,9 @@ fec_probe(struct platform_device *pdev)
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 failed_regulator:
+       clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+       clk_disable_unprepare(fep->clk_ipg);
 failed_clk_ipg:
        fec_enet_clk_enable(ndev, false);
 failed_clk:
@@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
 
        return 0;
@@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int ret;
 
-       return clk_prepare_enable(fep->clk_ipg);
+       ret = clk_prepare_enable(fep->clk_ahb);
+       if (ret)
+               return ret;
+       ret = clk_prepare_enable(fep->clk_ipg);
+       if (ret)
+               goto failed_clk_ipg;
+
+       return 0;
+
+failed_clk_ipg:
+       clk_disable_unprepare(fep->clk_ahb);
+       return ret;
 }
 
 static const struct dev_pm_ops fec_pm_ops = {
index 51cfe95f3e247d9fd025bc474964289337b4c4d0..3dfb2d131eb76f29c6129e8f29f477ab7be840db 100644 (file)
@@ -3762,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
 {
        struct device *dev = &adapter->vdev->dev;
        struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+       netdev_features_t old_hw_features = 0;
        union ibmvnic_crq crq;
        int i;
 
@@ -3837,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
        adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
        adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
 
-       adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
+       if (adapter->state != VNIC_PROBING) {
+               old_hw_features = adapter->netdev->hw_features;
+               adapter->netdev->hw_features = 0;
+       }
+
+       adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
 
        if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
-               adapter->netdev->features |= NETIF_F_IP_CSUM;
+               adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
 
        if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
-               adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+               adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
 
        if ((adapter->netdev->features &
            (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
-               adapter->netdev->features |= NETIF_F_RXCSUM;
+               adapter->netdev->hw_features |= NETIF_F_RXCSUM;
 
        if (buf->large_tx_ipv4)
-               adapter->netdev->features |= NETIF_F_TSO;
+               adapter->netdev->hw_features |= NETIF_F_TSO;
        if (buf->large_tx_ipv6)
-               adapter->netdev->features |= NETIF_F_TSO6;
+               adapter->netdev->hw_features |= NETIF_F_TSO6;
 
-       adapter->netdev->hw_features |= adapter->netdev->features;
+       if (adapter->state == VNIC_PROBING) {
+               adapter->netdev->features |= adapter->netdev->hw_features;
+       } else if (old_hw_features != adapter->netdev->hw_features) {
+               netdev_features_t tmp = 0;
+
+               /* disable features no longer supported */
+               adapter->netdev->features &= adapter->netdev->hw_features;
+               /* turn on features now supported if previously enabled */
+               tmp = (old_hw_features ^ adapter->netdev->hw_features) &
+                       adapter->netdev->hw_features;
+               adapter->netdev->features |=
+                               tmp & adapter->netdev->wanted_features;
+       }
 
        memset(&crq, 0, sizeof(crq));
        crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
index 71c65cc1790484b8e24e4d02ec973b830179c7de..d3eaf2ceaa3979b79a65d83beaaf072567d48c00 100644 (file)
@@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
  * switching channels
  */
 typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
                               struct mlx5e_channels *new_chs,
                               mlx5e_fp_hw_modify hw_modify);
index 9d38e62cdf248a2f624b12227133f8132f7591bd..476dd97f7f2f25a4c0697a6ac2b34c0b5985034e 100644 (file)
@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
 
 static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
 {
-       int err;
+       int err = 0;
 
        rtnl_lock();
        mutex_lock(&priv->state_lock);
-       mlx5e_close_locked(priv->netdev);
-       err = mlx5e_open_locked(priv->netdev);
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               goto out;
+
+       err = mlx5e_safe_reopen_channels(priv);
+
+out:
        mutex_unlock(&priv->state_lock);
        rtnl_unlock();
 
index fa2a3c444cdc604c308999f140a3125becd9c8d3..eec07b34b4ad07c627eebf36f87557296cc1701a 100644 (file)
@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
                        return -EOPNOTSUPP;
        }
 
+       if (!(mlx5e_eswitch_rep(*out_dev) &&
+             mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
+               return -EOPNOTSUPP;
+
        return 0;
 }
 
index 03b2a9f9c5895af92bcefad0b3525757aa0191c1..cad34d6f5f451b1bfdf4b363ef25cbe50cab9fcd 100644 (file)
 #include <linux/bpf_trace.h>
 #include "en/xdp.h"
 
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
+{
+       int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+
+       /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
+        * The condition checked in mlx5e_rx_is_linear_skb is:
+        *   SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE         (1)
+        *   (Note that hw_mtu == sw_mtu + hard_mtu.)
+        * What is returned from this function is:
+        *   max_mtu = PAGE_SIZE - S - hr - hard_mtu                         (2)
+        * After assigning sw_mtu := max_mtu, the left side of (1) turns to
+        * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
+        * because both PAGE_SIZE and S are already aligned. Any number greater
+        * than max_mtu would make the left side of (1) greater than PAGE_SIZE,
+        * so max_mtu is the maximum MTU allowed.
+        */
+
+       return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
+}
+
 static inline bool
 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
                    struct xdp_buff *xdp)
@@ -304,9 +324,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
                                        mlx5e_xdpi_fifo_pop(xdpi_fifo);
 
                                if (is_redirect) {
-                                       xdp_return_frame(xdpi.xdpf);
                                        dma_unmap_single(sq->pdev, xdpi.dma_addr,
                                                         xdpi.xdpf->len, DMA_TO_DEVICE);
+                                       xdp_return_frame(xdpi.xdpf);
                                } else {
                                        /* Recycle RX page */
                                        mlx5e_page_release(rq, &xdpi.di, true);
@@ -345,9 +365,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
                                mlx5e_xdpi_fifo_pop(xdpi_fifo);
 
                        if (is_redirect) {
-                               xdp_return_frame(xdpi.xdpf);
                                dma_unmap_single(sq->pdev, xdpi.dma_addr,
                                                 xdpi.xdpf->len, DMA_TO_DEVICE);
+                               xdp_return_frame(xdpi.xdpf);
                        } else {
                                /* Recycle RX page */
                                mlx5e_page_release(rq, &xdpi.di, false);
index ee27a7c8cd87d5121361f22344b53a21a7fb408b..553956cadc8a00d6bed384aa6cdad86759d5b2dd 100644 (file)
 
 #include "en.h"
 
-#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
-                                MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 #define MLX5E_XDP_TX_EMPTY_DS_COUNT \
        (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
 #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
 
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
                      void *va, u16 *rx_headroom, u32 *len);
 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
index 5efce4a3ff7965ab6899e8c2521a96c2e8f33043..78dc8fe2a83c3499d290ceffcb273ebcd7ef1932 100644 (file)
@@ -1586,7 +1586,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
                break;
        case MLX5_MODULE_ID_SFP:
                modinfo->type       = ETH_MODULE_SFF_8472;
-               modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+               modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
                break;
        default:
                netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
@@ -1768,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
        struct mlx5e_channel *c;
        int i;
 
-       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
+           priv->channels.params.xdp_prog)
                return 0;
 
        for (i = 0; i < channels->num; i++) {
index b5fdbd3190d9fa99e6207c25e183354b1d5dad01..46157e2a1e5ac36121f8ec96f9f5a09417b5fa67 100644 (file)
@@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (params->rx_dim_enabled)
                __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
-       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
+       /* We disable csum_complete when XDP is enabled since
+        * XDP programs might manipulate packets which will render
+        * skb->checksum incorrect.
+        */
+       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
                __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
 
        return 0;
@@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
        return 0;
 }
 
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
+{
+       struct mlx5e_channels new_channels = {};
+
+       new_channels.params = priv->channels.params;
+       return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+}
+
 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
 {
        priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
@@ -3765,7 +3777,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
        if (params->xdp_prog &&
            !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
                netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
-                          new_mtu, MLX5E_XDP_MAX_MTU);
+                          new_mtu, mlx5e_xdp_max_mtu(params));
                err = -EINVAL;
                goto out;
        }
@@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
        if (!report_failed)
                goto unlock;
 
-       mlx5e_close_locked(priv->netdev);
-       err = mlx5e_open_locked(priv->netdev);
+       err = mlx5e_safe_reopen_channels(priv);
        if (err)
                netdev_err(priv->netdev,
-                          "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+                          "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
                           err);
 
 unlock:
@@ -4201,7 +4212,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
 
        if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
                netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
-                           new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
+                           new_channels.params.sw_mtu,
+                           mlx5e_xdp_max_mtu(&new_channels.params));
                return -EINVAL;
        }
 
@@ -4553,7 +4565,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
 {
        enum mlx5e_traffic_types tt;
 
-       rss_params->hfunc = ETH_RSS_HASH_XOR;
+       rss_params->hfunc = ETH_RSS_HASH_TOP;
        netdev_rss_key_fill(rss_params->toeplitz_hash_key,
                            sizeof(rss_params->toeplitz_hash_key));
        mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
index 3dde5c7e0739afd6d04f874290d5a332c97f68cf..c3b3002ff62f073f8c9fff88ea2fb74693474619 100644 (file)
@@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
 {
        *proto = ((struct ethhdr *)skb->data)->h_proto;
        *proto = __vlan_get_protocol(skb, *proto, network_depth);
-       return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
+
+       if (*proto == htons(ETH_P_IP))
+               return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
+
+       if (*proto == htons(ETH_P_IPV6))
+               return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
+
+       return false;
 }
 
 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
@@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
        rq->stats->ecn_mark += !!rc;
 }
 
-static u32 mlx5e_get_fcs(const struct sk_buff *skb)
-{
-       const void *fcs_bytes;
-       u32 _fcs_bytes;
-
-       fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
-                                      ETH_FCS_LEN, &_fcs_bytes);
-
-       return __get_unaligned_cpu32(fcs_bytes);
-}
-
 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 {
        void *ip_p = skb->data + network_depth;
@@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 
 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
 
+#define MAX_PADDING 8
+
+static void
+tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
+                      struct mlx5e_rq_stats *stats)
+{
+       stats->csum_complete_tail_slow++;
+       skb->csum = csum_block_add(skb->csum,
+                                  skb_checksum(skb, offset, len, 0),
+                                  offset);
+}
+
+static void
+tail_padding_csum(struct sk_buff *skb, int offset,
+                 struct mlx5e_rq_stats *stats)
+{
+       u8 tail_padding[MAX_PADDING];
+       int len = skb->len - offset;
+       void *tail;
+
+       if (unlikely(len > MAX_PADDING)) {
+               tail_padding_csum_slow(skb, offset, len, stats);
+               return;
+       }
+
+       tail = skb_header_pointer(skb, offset, len, tail_padding);
+       if (unlikely(!tail)) {
+               tail_padding_csum_slow(skb, offset, len, stats);
+               return;
+       }
+
+       stats->csum_complete_tail++;
+       skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
+}
+
+static void
+mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
+                      struct mlx5e_rq_stats *stats)
+{
+       struct ipv6hdr *ip6;
+       struct iphdr   *ip4;
+       int pkt_len;
+
+       switch (proto) {
+       case htons(ETH_P_IP):
+               ip4 = (struct iphdr *)(skb->data + network_depth);
+               pkt_len = network_depth + ntohs(ip4->tot_len);
+               break;
+       case htons(ETH_P_IPV6):
+               ip6 = (struct ipv6hdr *)(skb->data + network_depth);
+               pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
+               break;
+       default:
+               return;
+       }
+
+       if (likely(pkt_len >= skb->len))
+               return;
+
+       tail_padding_csum(skb, pkt_len, stats);
+}
+
 static inline void mlx5e_handle_csum(struct net_device *netdev,
                                     struct mlx5_cqe64 *cqe,
                                     struct mlx5e_rq *rq,
@@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                return;
        }
 
-       if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
+       /* True when explicitly set via priv flag, or XDP prog is loaded */
+       if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
                goto csum_unnecessary;
 
        /* CQE csum doesn't cover padding octets in short ethernet
@@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                        skb->csum = csum_partial(skb->data + ETH_HLEN,
                                                 network_depth - ETH_HLEN,
                                                 skb->csum);
-               if (unlikely(netdev->features & NETIF_F_RXFCS))
-                       skb->csum = csum_block_add(skb->csum,
-                                                  (__force __wsum)mlx5e_get_fcs(skb),
-                                                  skb->len - ETH_FCS_LEN);
+
+               mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
                stats->csum_complete++;
                return;
        }
 
 csum_unnecessary:
        if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
-                  ((cqe->hds_ip_ext & CQE_L4_OK) ||
-                   (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
+                  (cqe->hds_ip_ext & CQE_L4_OK))) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
                if (cqe_is_tunneled(cqe)) {
                        skb->csum_level = 1;
index 1a78e05cbba8168d919bfd45af3378becd3c9b68..b75aa8b8bf04eac8cac464c0c8550013154f6267 100644 (file)
@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
@@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
                s->rx_csum_none += rq_stats->csum_none;
                s->rx_csum_complete += rq_stats->csum_complete;
+               s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
+               s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
                s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_xdp_drop     += rq_stats->xdp_drop;
@@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
index 4640d4f986f8c6495bc5c94cf22217fb59a64b34..16c3b785f282b109e9b2bc54bd4c136095be9b3f 100644 (file)
@@ -71,6 +71,8 @@ struct mlx5e_sw_stats {
        u64 rx_csum_unnecessary;
        u64 rx_csum_none;
        u64 rx_csum_complete;
+       u64 rx_csum_complete_tail;
+       u64 rx_csum_complete_tail_slow;
        u64 rx_csum_unnecessary_inner;
        u64 rx_xdp_drop;
        u64 rx_xdp_redirect;
@@ -181,6 +183,8 @@ struct mlx5e_rq_stats {
        u64 packets;
        u64 bytes;
        u64 csum_complete;
+       u64 csum_complete_tail;
+       u64 csum_complete_tail_slow;
        u64 csum_unnecessary;
        u64 csum_unnecessary_inner;
        u64 csum_none;
index 8de64e88c670ccb850d5a84ddfb653a1ef7c300d..22a2ef11151441c3abcfc07e7a6e66e292563cae 100644 (file)
@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
        return ret;
 }
 
-static void mlx5_fpga_tls_release_swid(struct idr *idr,
-                                      spinlock_t *idr_spinlock, u32 swid)
+static void *mlx5_fpga_tls_release_swid(struct idr *idr,
+                                       spinlock_t *idr_spinlock, u32 swid)
 {
        unsigned long flags;
+       void *ptr;
 
        spin_lock_irqsave(idr_spinlock, flags);
-       idr_remove(idr, swid);
+       ptr = idr_remove(idr, swid);
        spin_unlock_irqrestore(idr_spinlock, flags);
+       return ptr;
 }
 
 static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
        kfree(buf);
 }
 
-struct mlx5_teardown_stream_context {
-       struct mlx5_fpga_tls_command_context cmd;
-       u32 swid;
-};
-
 static void
 mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
                                  struct mlx5_fpga_device *fdev,
                                  struct mlx5_fpga_tls_command_context *cmd,
                                  struct mlx5_fpga_dma_buf *resp)
 {
-       struct mlx5_teardown_stream_context *ctx =
-                   container_of(cmd, struct mlx5_teardown_stream_context, cmd);
-
        if (resp) {
                u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
 
@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
                        mlx5_fpga_err(fdev,
                                      "Teardown stream failed with syndrome = %d",
                                      syndrome);
-               else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
-                       mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
-                                                  &fdev->tls->tx_idr_spinlock,
-                                                  ctx->swid);
-               else
-                       mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
-                                                  &fdev->tls->rx_idr_spinlock,
-                                                  ctx->swid);
        }
        mlx5_fpga_tls_put_command_ctx(cmd);
 }
@@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
        void *cmd;
        int ret;
 
-       rcu_read_lock();
-       flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
-       rcu_read_unlock();
-
-       if (!flow) {
-               WARN_ONCE(1, "Received NULL pointer for handle\n");
-               return -EINVAL;
-       }
-
        buf = kzalloc(size, GFP_ATOMIC);
        if (!buf)
                return -ENOMEM;
 
        cmd = (buf + 1);
 
+       rcu_read_lock();
+       flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+       if (unlikely(!flow)) {
+               rcu_read_unlock();
+               WARN_ONCE(1, "Received NULL pointer for handle\n");
+               kfree(buf);
+               return -EINVAL;
+       }
        mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+       rcu_read_unlock();
 
        MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
        MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
@@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
 static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
                                            void *flow, u32 swid, gfp_t flags)
 {
-       struct mlx5_teardown_stream_context *ctx;
+       struct mlx5_fpga_tls_command_context *ctx;
        struct mlx5_fpga_dma_buf *buf;
        void *cmd;
 
@@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
        if (!ctx)
                return;
 
-       buf = &ctx->cmd.buf;
+       buf = &ctx->buf;
        cmd = (ctx + 1);
        MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
        MLX5_SET(tls_cmd, cmd, swid, swid);
@@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
        buf->sg[0].data = cmd;
        buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
 
-       ctx->swid = swid;
-       mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+       mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
                               mlx5_fpga_tls_teardown_completion);
 }
 
@@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
        struct mlx5_fpga_tls *tls = mdev->fpga->tls;
        void *flow;
 
-       rcu_read_lock();
        if (direction_sx)
-               flow = idr_find(&tls->tx_idr, swid);
+               flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
+                                                 &tls->tx_idr_spinlock,
+                                                 swid);
        else
-               flow = idr_find(&tls->rx_idr, swid);
-
-       rcu_read_unlock();
+               flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
+                                                 &tls->rx_idr_spinlock,
+                                                 swid);
 
        if (!flow) {
                mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
@@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
                return;
        }
 
+       synchronize_rcu(); /* before kfree(flow) */
        mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
 }
 
index 21b7f05b16a5f6053a88c1cdb9067c0f9e26ea10..361468e0435dcc9fbb667716e483c4104ebf7fea 100644 (file)
@@ -317,10 +317,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
                size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
 
        i2c_addr = MLX5_I2C_ADDR_LOW;
-       if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
-               i2c_addr = MLX5_I2C_ADDR_HIGH;
-               offset -= MLX5_EEPROM_PAGE_LENGTH;
-       }
 
        MLX5_SET(mcia_reg, in, l, 0);
        MLX5_SET(mcia_reg, in, module, module_num);
index d23d53c0e2842bc0e28a179d3ef6457a3369cee7..f26a4ca293637b48ccc9aa8b9e55f375f767eedb 100644 (file)
@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
        if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
                return 0;
 
-       emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+       emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
        if (!emad_wq)
                return -ENOMEM;
        mlxsw_core->emad_wq = emad_wq;
@@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void)
 {
        int err;
 
-       mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
+       mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
        if (!mlxsw_wq)
                return -ENOMEM;
-       mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
+       mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
                                            mlxsw_core_driver_name);
        if (!mlxsw_owq) {
                err = -ENOMEM;
index ffee38e36ce8995348f776bbdbb8e4601b36b223..8648ca1712543abf8e6ef7bcabfae895c4009465 100644 (file)
@@ -27,7 +27,7 @@
 
 #define MLXSW_PCI_SW_RESET                     0xF0010
 #define MLXSW_PCI_SW_RESET_RST_BIT             BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       13000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       20000
 #define MLXSW_PCI_SW_RESET_WAIT_MSECS          100
 #define MLXSW_PCI_FW_READY                     0xA1844
 #define MLXSW_PCI_FW_READY_MASK                        0xFFFF
index 9eb63300c1d3a712d6377ea2835c5d64f2b32678..6b8aa3761899b03e7c9211b7d3272a1027a57371 100644 (file)
@@ -3126,11 +3126,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
        if (err)
                return err;
 
+       mlxsw_sp_port->link.autoneg = autoneg;
+
        if (!netif_running(dev))
                return 0;
 
-       mlxsw_sp_port->link.autoneg = autoneg;
-
        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
 
@@ -3316,7 +3316,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
                                            MLXSW_REG_QEEC_HIERARCY_TC,
                                            i + 8, i,
-                                           false, 0);
+                                           true, 100);
                if (err)
                        return err;
        }
index 9a79b5e1159743a9b619407cd3b9b9af065c97ca..d633bef5f10512269547c00f718f552720dd29a3 100644 (file)
@@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
        {MLXSW_REG_SBXX_DIR_EGRESS, 1},
        {MLXSW_REG_SBXX_DIR_EGRESS, 2},
        {MLXSW_REG_SBXX_DIR_EGRESS, 3},
+       {MLXSW_REG_SBXX_DIR_EGRESS, 15},
 };
 
 #define MLXSW_SP_SB_ING_TC_COUNT 8
@@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
 };
 
 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
@@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
        MLXSW_SP_SB_CM(0, 7, 4),
        MLXSW_SP_SB_CM(0, 7, 4),
        MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
        MLXSW_SP_SB_CM(1, 0xff, 4),
 };
 
@@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
        MLXSW_SP_SB_PM(0, 0),
        MLXSW_SP_SB_PM(0, 0),
        MLXSW_SP_SB_PM(0, 0),
+       MLXSW_SP_SB_PM(10000, 90000),
 };
 
 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
index 52fed8c7bf1edf61aa1c3b61c04f71e8dfc9717f..902e766a8ed33eabbe0b11075284878ba3fb3ad4 100644 (file)
@@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
        /* A RIF is not created for macvlan netdevs. Their MAC is used to
         * populate the FDB
         */
-       if (netif_is_macvlan(dev))
+       if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
                return 0;
 
        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
index f6ce386c30367f08a86153b8ac691a1480a1588a..50111f228d77228758d5e0ad634b1848712e11d4 100644 (file)
@@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
        u16 fid_index;
        int err = 0;
 
-       if (switchdev_trans_ph_prepare(trans))
+       if (switchdev_trans_ph_commit(trans))
                return 0;
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
index a1d0d6e4253324f702f6eecae804fdd574b5f32e..d715ef4fc92fdb61a89122793133db147c1e4f59 100644 (file)
@@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port,
                              struct netdev_hw_addr *hw_addr)
 {
        struct ocelot *ocelot = port->ocelot;
-       struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
+       struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC);
 
        if (!ha)
                return -ENOMEM;
@@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
                       ETH_GSTRING_LEN);
 }
 
-static void ocelot_check_stats(struct work_struct *work)
+static void ocelot_update_stats(struct ocelot *ocelot)
 {
-       struct delayed_work *del_work = to_delayed_work(work);
-       struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
        int i, j;
 
        mutex_lock(&ocelot->stats_lock);
@@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work)
                }
        }
 
-       cancel_delayed_work(&ocelot->stats_work);
+       mutex_unlock(&ocelot->stats_lock);
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+       struct delayed_work *del_work = to_delayed_work(work);
+       struct ocelot *ocelot = container_of(del_work, struct ocelot,
+                                            stats_work);
+
+       ocelot_update_stats(ocelot);
+
        queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
                           OCELOT_STATS_CHECK_DELAY);
-
-       mutex_unlock(&ocelot->stats_lock);
 }
 
 static void ocelot_get_ethtool_stats(struct net_device *dev,
@@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
        int i;
 
        /* check and update now */
-       ocelot_check_stats(&ocelot->stats_work.work);
+       ocelot_update_stats(ocelot);
 
        /* Copy all counters */
        for (i = 0; i < ocelot->num_stats; i++)
@@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot)
                                 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
                                 ANA_CPUQ_8021_CFG, i);
 
-       INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats);
+       INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
        queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
                           OCELOT_STATS_CHECK_DELAY);
        return 0;
index 7cde387e5ec62a0c36f070a163a6e5b9c38a6a4b..51cd57ab3d9584d3d67508cc94aa6c9590aa11d1 100644 (file)
@@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
                                dma_object->addr))) {
                        vxge_os_dma_free(devh->pdev, memblock,
                                &dma_object->acc_handle);
+                       memblock = NULL;
                        goto exit;
                }
 
index 9852080cf45483c49db22663c7f8caa1f7fe5e3b..ff391308566525cd613acc3a733130b41e7246a9 100644 (file)
@@ -39,7 +39,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
        }
        if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
            knode->sel->offoff || knode->fshift) {
-               NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
+               NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported");
                return false;
        }
        if (knode->sel->hoff || knode->sel->hmask) {
@@ -78,7 +78,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
 
        k = &knode->sel->keys[0];
        if (k->offmask) {
-               NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
+               NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported");
                return false;
        }
        if (k->off) {
index 43a57ec296fd9c6e55ec667f34b6ddd98880e41f..127c89b22ef0da7d60680481467517f83ca7bc72 100644 (file)
@@ -431,12 +431,16 @@ struct qed_qm_info {
        u8 num_pf_rls;
 };
 
+#define QED_OVERFLOW_BIT       1
+
 struct qed_db_recovery_info {
        struct list_head list;
 
        /* Lock to protect the doorbell recovery mechanism list */
        spinlock_t lock;
+       bool dorq_attn;
        u32 db_recovery_counter;
+       unsigned long overflow;
 };
 
 struct storm_stats {
@@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
 
 /* doorbell recovery mechanism */
 void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
-                            enum qed_db_rec_exec db_exec);
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
 bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
 
 /* Other Linux specific common definitions */
index 9df8c4b3b54e3dc71fdca5a759a9d98b303d8ac7..866cdc86a3f27c879d4364089597ea499a0b1714 100644 (file)
@@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
 
 /* Doorbell address sanity (address within doorbell bar range) */
 static bool qed_db_rec_sanity(struct qed_dev *cdev,
-                             void __iomem *db_addr, void *db_data)
+                             void __iomem *db_addr,
+                             enum qed_db_rec_width db_width,
+                             void *db_data)
 {
+       u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
+
        /* Make sure doorbell address is within the doorbell bar */
        if (db_addr < cdev->doorbells ||
-           (u8 __iomem *)db_addr >
+           (u8 __iomem *)db_addr + width >
            (u8 __iomem *)cdev->doorbells + cdev->db_size) {
                WARN(true,
                     "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
@@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev,
        }
 
        /* Sanitize doorbell address */
-       if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+       if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
                return -EINVAL;
 
        /* Obtain hwfn from doorbell address */
@@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev,
                return 0;
        }
 
-       /* Sanitize doorbell address */
-       if (!qed_db_rec_sanity(cdev, db_addr, db_data))
-               return -EINVAL;
-
        /* Obtain hwfn from doorbell address */
        p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
 
@@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
 
 /* Ring the doorbell of a single doorbell recovery entry */
 static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
-                                struct qed_db_recovery_entry *db_entry,
-                                enum qed_db_rec_exec db_exec)
-{
-       if (db_exec != DB_REC_ONCE) {
-               /* Print according to width */
-               if (db_entry->db_width == DB_REC_WIDTH_32B) {
-                       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-                                  "%s doorbell address %p data %x\n",
-                                  db_exec == DB_REC_DRY_RUN ?
-                                  "would have rung" : "ringing",
-                                  db_entry->db_addr,
-                                  *(u32 *)db_entry->db_data);
-               } else {
-                       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-                                  "%s doorbell address %p data %llx\n",
-                                  db_exec == DB_REC_DRY_RUN ?
-                                  "would have rung" : "ringing",
-                                  db_entry->db_addr,
-                                  *(u64 *)(db_entry->db_data));
-               }
+                                struct qed_db_recovery_entry *db_entry)
+{
+       /* Print according to width */
+       if (db_entry->db_width == DB_REC_WIDTH_32B) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "ringing doorbell address %p data %x\n",
+                          db_entry->db_addr,
+                          *(u32 *)db_entry->db_data);
+       } else {
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "ringing doorbell address %p data %llx\n",
+                          db_entry->db_addr,
+                          *(u64 *)(db_entry->db_data));
        }
 
        /* Sanity */
        if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
-                              db_entry->db_data))
+                              db_entry->db_width, db_entry->db_data))
                return;
 
        /* Flush the write combined buffer. Since there are multiple doorbelling
@@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
        wmb();
 
        /* Ring the doorbell */
-       if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
-               if (db_entry->db_width == DB_REC_WIDTH_32B)
-                       DIRECT_REG_WR(db_entry->db_addr,
-                                     *(u32 *)(db_entry->db_data));
-               else
-                       DIRECT_REG_WR64(db_entry->db_addr,
-                                       *(u64 *)(db_entry->db_data));
-       }
+       if (db_entry->db_width == DB_REC_WIDTH_32B)
+               DIRECT_REG_WR(db_entry->db_addr,
+                             *(u32 *)(db_entry->db_data));
+       else
+               DIRECT_REG_WR64(db_entry->db_addr,
+                               *(u64 *)(db_entry->db_data));
 
        /* Flush the write combined buffer. Next doorbell may come from a
         * different entity to the same address...
@@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
 }
 
 /* Traverse the doorbell recovery entry list and ring all the doorbells */
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
-                            enum qed_db_rec_exec db_exec)
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
 {
        struct qed_db_recovery_entry *db_entry = NULL;
 
-       if (db_exec != DB_REC_ONCE) {
-               DP_NOTICE(p_hwfn,
-                         "Executing doorbell recovery. Counter was %d\n",
-                         p_hwfn->db_recovery_info.db_recovery_counter);
+       DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
+                 p_hwfn->db_recovery_info.db_recovery_counter);
 
-               /* Track amount of times recovery was executed */
-               p_hwfn->db_recovery_info.db_recovery_counter++;
-       }
+       /* Track amount of times recovery was executed */
+       p_hwfn->db_recovery_info.db_recovery_counter++;
 
        /* Protect the list */
        spin_lock_bh(&p_hwfn->db_recovery_info.lock);
        list_for_each_entry(db_entry,
-                           &p_hwfn->db_recovery_info.list, list_entry) {
-               qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
-               if (db_exec == DB_REC_ONCE)
-                       break;
-       }
-
+                           &p_hwfn->db_recovery_info.list, list_entry)
+               qed_db_recovery_ring(p_hwfn, db_entry);
        spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
 }
 
index e23980e301b6a2be7f015d6a0c6f6aaadbf788b3..8848d5bed6e5c58a188900bf9ad5710529d66b51 100644 (file)
@@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
        u32 count = QED_DB_REC_COUNT;
        u32 usage = 1;
 
+       /* Flush any pending (e)dpms as they may never arrive */
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
        /* wait for usage to zero or count to run out. This is necessary since
         * EDPM doorbell transactions can take multiple 64b cycles, and as such
         * can "split" over the pci. Possibly, the doorbell drop can happen with
@@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
 
 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 overflow;
+       u32 attn_ovfl, cur_ovfl;
        int rc;
 
-       overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
-       DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
-       if (!overflow) {
-               qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+       attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
+                                      &p_hwfn->db_recovery_info.overflow);
+       cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+       if (!cur_ovfl && !attn_ovfl)
                return 0;
-       }
 
-       if (qed_edpm_enabled(p_hwfn)) {
+       DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
+                 attn_ovfl, cur_ovfl);
+
+       if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
                rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
                if (rc)
                        return rc;
        }
 
-       /* Flush any pending (e)dpm as they may never arrive */
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
-
        /* Release overflow sticky indication (stop silently dropping everything) */
        qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
 
        /* Repeat all last doorbells (doorbell drop recovery) */
-       qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+       qed_db_recovery_execute(p_hwfn);
 
        return 0;
 }
 
-static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
 {
-       u32 int_sts, first_drop_reason, details, address, all_drops_reason;
        struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+       u32 overflow;
        int rc;
 
-       int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
-       DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+       overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+       if (!overflow)
+               goto out;
+
+       /* Run PF doorbell recovery in next periodic handler */
+       set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
+
+       if (!p_hwfn->db_bar_no_edpm) {
+               rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+               if (rc)
+                       goto out;
+       }
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+out:
+       /* Schedule the handler even if overflow was not detected */
+       qed_periodic_db_rec_start(p_hwfn);
+}
+
+static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
+{
+       u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+       struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
 
        /* int_sts may be zero since all PFs were interrupted for doorbell
         * overflow but another one already handled it. Can abort here. If
         * This PF also requires overflow recovery we will be interrupted again.
         * The masked almost full indication may also be set. Ignoring.
         */
+       int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
        if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
                return 0;
 
+       DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+
        /* check if db_drop or overflow happened */
        if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
                       DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
@@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
                          GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
                          first_drop_reason, all_drops_reason);
 
-               rc = qed_db_rec_handler(p_hwfn, p_ptt);
-               qed_periodic_db_rec_start(p_hwfn);
-               if (rc)
-                       return rc;
-
                /* Clear the doorbell drop details and prepare for next drop */
                qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
 
@@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
        return -EINVAL;
 }
 
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+       p_hwfn->db_recovery_info.dorq_attn = true;
+       qed_dorq_attn_overflow(p_hwfn);
+
+       return qed_dorq_attn_int_sts(p_hwfn);
+}
+
+static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
+{
+       if (p_hwfn->db_recovery_info.dorq_attn)
+               goto out;
+
+       /* Call DORQ callback if the attention was missed */
+       qed_dorq_attn_cb(p_hwfn);
+out:
+       p_hwfn->db_recovery_info.dorq_attn = false;
+}
+
 /* Instead of major changes to the data-structure, we have a some 'special'
  * identifiers for sources that changed meaning between adapters.
  */
@@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
                }
        }
 
+       /* Handle missed DORQ attention */
+       qed_dorq_attn_handler(p_hwfn);
+
        /* Clear IGU indication for the deasserted bits */
        DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
                                    GTT_BAR0_MAP_REG_IGU_CMD +
index 1f356ed4f761e72486df4b57b1994e8f2dd89032..d473b522afc5137f69edece72c535397623ad05d 100644 (file)
@@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
 
 /**
  * @brief - Doorbell Recovery handler.
- *          Run DB_REAL_DEAL doorbell recovery in case of PF overflow
- *          (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ *          Run doorbell recovery in case of PF overflow (and flush DORQ if
+ *          needed).
  *
  * @param p_hwfn
  * @param p_ptt
index f164d4acebcb43a4cd7b2858ad31e95e80467b74..6de23b56b2945c55118cbc3464a46881031583eb 100644 (file)
@@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
        }
 }
 
-#define QED_PERIODIC_DB_REC_COUNT              100
+#define QED_PERIODIC_DB_REC_COUNT              10
 #define QED_PERIODIC_DB_REC_INTERVAL_MS                100
 #define QED_PERIODIC_DB_REC_INTERVAL \
        msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
index 9faaa6df78ed99b8b20b7f78b9efa9d4113b74e3..2f318aaf2b05d8145d4a0a4c45421fbff0bad455 100644 (file)
@@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
                        p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
                } else {
                        DP_INFO(p_hwfn,
-                               "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+                               "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
                                vf->abs_vf_id,
                                req->vfdev_info.eth_fp_hsi_major,
                                req->vfdev_info.eth_fp_hsi_minor,
index 5f3f42a25361679220fcc55224fcb2aa46adec03..bddb2b5982dcfedff2e8139741be978a1fc40e95 100644 (file)
@@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
 
        ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
        if (IS_ERR(ptp->clock)) {
-               rc = -EINVAL;
                DP_ERR(edev, "PTP clock registration failed\n");
+               qede_ptp_disable(edev);
+               rc = -EINVAL;
                goto err2;
        }
 
        return 0;
 
-err2:
-       qede_ptp_disable(edev);
-       ptp->clock = NULL;
 err1:
        kfree(ptp);
+err2:
        edev->ptp = NULL;
 
        return rc;
index a18149720aa2eadcd5ba9690bad3d568f5aeb812..cba5881b2746a36da1b710c5be14ed8ba5a7c080 100644 (file)
@@ -673,7 +673,8 @@ static void netsec_process_tx(struct netsec_priv *priv)
 }
 
 static void *netsec_alloc_rx_data(struct netsec_priv *priv,
-                                 dma_addr_t *dma_handle, u16 *desc_len)
+                                 dma_addr_t *dma_handle, u16 *desc_len,
+                                 bool napi)
 {
        size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        size_t payload_len = NETSEC_RX_BUF_SZ;
@@ -682,7 +683,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
 
        total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
 
-       buf = napi_alloc_frag(total_len);
+       buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len);
        if (!buf)
                return NULL;
 
@@ -765,7 +766,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
                /* allocate a fresh buffer and map it to the hardware.
                 * This will eventually replace the old buffer in the hardware
                 */
-               buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+               buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len,
+                                               true);
                if (unlikely(!buf_addr))
                        break;
 
@@ -1069,7 +1071,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
                void *buf;
                u16 len;
 
-               buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+               buf = netsec_alloc_rx_data(priv, &dma_handle, &len,
+                                          false);
                if (!buf) {
                        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
                        goto err_out;
index b7dd4e3c760d82da1439fb0d8dd9d418c34256a2..6d690678c20e11bf8594729524fafa238bb1c4bb 100644 (file)
@@ -140,7 +140,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
        p->des0 |= cpu_to_le32(RDES0_OWN);
 
        bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
-       p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
+       p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ndesc_rx_set_on_chain(p, end);
index a26e36dbb5df0deff58ef23a18df287c0ceef330..48712437d0da8039e74c8cc34d8063ac666c03a8 100644 (file)
@@ -2616,8 +2616,6 @@ static int stmmac_open(struct net_device *dev)
        u32 chan;
        int ret;
 
-       stmmac_check_ether_addr(priv);
-
        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
            priv->hw->pcs != STMMAC_PCS_TBI &&
            priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -4303,6 +4301,8 @@ int stmmac_dvr_probe(struct device *device,
        if (ret)
                goto error_hw_init;
 
+       stmmac_check_ether_addr(priv);
+
        /* Configure real RX and TX queues */
        netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
        netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
index d819e8eaba1225dc5e9b188e42636721cc66a4c0..cc1e887e47b50f31bba7a53e8f146d9ac7fb4fa7 100644 (file)
@@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
                },
                .driver_data = (void *)&galileo_stmmac_dmi_data,
        },
+       /*
+        * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
+        * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
+        * has only one pci network device while other asset tags are
+        * for IOT2040 which has two.
+        */
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
@@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
-                       DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
-                                       "6ES7647-0AA00-1YA2"),
                },
                .driver_data = (void *)&iot2040_stmmac_dmi_data,
        },
index 92b64e254b44ed764d7db9039c949cbdb6d66597..7475cef17cf76ca09e59b6987b82010d6ce1076f 100644 (file)
@@ -159,6 +159,14 @@ static const struct spi_device_id ks8995_id[] = {
 };
 MODULE_DEVICE_TABLE(spi, ks8995_id);
 
+static const struct of_device_id ks8895_spi_of_match[] = {
+        { .compatible = "micrel,ks8995" },
+        { .compatible = "micrel,ksz8864" },
+        { .compatible = "micrel,ksz8795" },
+        { },
+ };
+MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
+
 static inline u8 get_chip_id(u8 val)
 {
        return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
@@ -526,6 +534,7 @@ static int ks8995_remove(struct spi_device *spi)
 static struct spi_driver ks8995_driver = {
        .driver = {
                .name       = "spi-ks8995",
+               .of_match_table = of_match_ptr(ks8895_spi_of_match),
        },
        .probe    = ks8995_probe,
        .remove   = ks8995_remove,
index f4e93f5fc2043ebb29c5b36e94afe49ec0c7d7ba..ea90db3c77058b6a799245bd5a3ff9f672b5da5e 100644 (file)
@@ -153,7 +153,7 @@ slhc_init(int rslots, int tslots)
 void
 slhc_free(struct slcompress *comp)
 {
-       if ( comp == NULLSLCOMPR )
+       if ( IS_ERR_OR_NULL(comp) )
                return;
 
        if ( comp->tstate != NULLSLSTATE )
index 6ed96fdfd96dd5a858e8416fcfcfa8c78c628e2a..16963f7a88f748fd0946fafef5c5a477e138cf63 100644 (file)
@@ -1156,6 +1156,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                return -EINVAL;
        }
 
+       if (netdev_has_upper_dev(dev, port_dev)) {
+               NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
+               netdev_err(dev, "Device %s is already an upper device of the team interface\n",
+                          portname);
+               return -EBUSY;
+       }
+
        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
            vlan_uses_dev(dev)) {
                NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
@@ -1246,6 +1253,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                goto err_option_port_add;
        }
 
+       /* set promiscuity level to new slave */
+       if (dev->flags & IFF_PROMISC) {
+               err = dev_set_promiscuity(port_dev, 1);
+               if (err)
+                       goto err_set_slave_promisc;
+       }
+
+       /* set allmulti level to new slave */
+       if (dev->flags & IFF_ALLMULTI) {
+               err = dev_set_allmulti(port_dev, 1);
+               if (err) {
+                       if (dev->flags & IFF_PROMISC)
+                               dev_set_promiscuity(port_dev, -1);
+                       goto err_set_slave_promisc;
+               }
+       }
+
        netif_addr_lock_bh(dev);
        dev_uc_sync_multiple(port_dev, dev);
        dev_mc_sync_multiple(port_dev, dev);
@@ -1262,6 +1286,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
 
        return 0;
 
+err_set_slave_promisc:
+       __team_option_inst_del_port(team, port);
+
 err_option_port_add:
        team_upper_dev_unlink(team, port);
 
@@ -1307,6 +1334,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
 
        team_port_disable(team, port);
        list_del_rcu(&port->list);
+
+       if (dev->flags & IFF_PROMISC)
+               dev_set_promiscuity(port_dev, -1);
+       if (dev->flags & IFF_ALLMULTI)
+               dev_set_allmulti(port_dev, -1);
+
        team_upper_dev_unlink(team, port);
        netdev_rx_handler_unregister(port_dev);
        team_port_disable_netpoll(port);
index cd15c32b2e43686925161ad48b080842f00ec19c..9ee4d7402ca23296091939a59a5a6fec459a8472 100644 (file)
@@ -875,6 +875,7 @@ static const struct net_device_ops vrf_netdev_ops = {
        .ndo_init               = vrf_dev_init,
        .ndo_uninit             = vrf_dev_uninit,
        .ndo_start_xmit         = vrf_xmit,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_get_stats64        = vrf_get_stats64,
        .ndo_add_slave          = vrf_add_slave,
        .ndo_del_slave          = vrf_del_slave,
@@ -1274,6 +1275,7 @@ static void vrf_setup(struct net_device *dev)
        /* default to no qdisc; user can add if desired */
        dev->priv_flags |= IFF_NO_QUEUE;
        dev->priv_flags |= IFF_NO_RX_HANDLER;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
        /* VRF devices do not care about MTU, but if the MTU is set
         * too low then the ipv4 and ipv6 protocols are disabled
index a20ea270d519be335b9b0086b1d5f9c8ea3d385d..1acc622d218333ac131666536b1077fb1b9ee808 100644 (file)
@@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
                        num_msdus++;
                        num_bytes += ret;
                }
-               ieee80211_return_txq(hw, txq);
+               ieee80211_return_txq(hw, txq, false);
                ieee80211_txq_schedule_end(hw, txq->ac);
 
                record->num_msdus = cpu_to_le16(num_msdus);
index b73c23d4ce86d0cd0631a4838b4ce3a150e34f49..41e89db244d20e67f27d3f226bbceba750962fb0 100644 (file)
@@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
                        if (ret < 0)
                                break;
                }
-               ieee80211_return_txq(hw, txq);
+               ieee80211_return_txq(hw, txq, false);
                ath10k_htt_tx_txq_update(hw, txq);
                if (ret == -EBUSY)
                        break;
@@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
                if (ret < 0)
                        break;
        }
-       ieee80211_return_txq(hw, txq);
+       ieee80211_return_txq(hw, txq, false);
        ath10k_htt_tx_txq_update(hw, txq);
 out:
        ieee80211_txq_schedule_end(hw, ac);
index 773d428ff1b03328ca43c1c8db74103d8d846444..b17e1ca40995eab7b0f80c479f0cff7381801e76 100644 (file)
@@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                goto out;
 
        while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
+               bool force;
+
                tid = (struct ath_atx_tid *)queue->drv_priv;
 
                ret = ath_tx_sched_aggr(sc, txq, tid);
                ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
 
-               ieee80211_return_txq(hw, queue);
+               force = !skb_queue_empty(&tid->retry_q);
+               ieee80211_return_txq(hw, queue, force);
        }
 
 out:
index fdc56f821b5ac0961f8b503d5f918999a01e214e..eb6defb6d0cd9b1e61e74c2d3e173c8a9d916da8 100644 (file)
@@ -82,6 +82,7 @@
 #define IWL_22000_HR_A0_FW_PRE         "iwlwifi-QuQnj-a0-hr-a0-"
 #define IWL_22000_SU_Z0_FW_PRE         "iwlwifi-su-z0-"
 #define IWL_QU_B_JF_B_FW_PRE           "iwlwifi-Qu-b0-jf-b0-"
+#define IWL_QUZ_A_HR_B_FW_PRE          "iwlwifi-QuZ-a0-hr-b0-"
 #define IWL_QNJ_B_JF_B_FW_PRE          "iwlwifi-QuQnj-b0-jf-b0-"
 #define IWL_CC_A_FW_PRE                        "iwlwifi-cc-a0-"
 #define IWL_22000_SO_A_JF_B_FW_PRE     "iwlwifi-so-a0-jf-b0-"
        IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
        IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
-#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
-       IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
+       IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
        IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api)            \
@@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
        .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
 };
 
-const struct iwl_cfg iwl22260_2ax_cfg = {
-       .name = "Intel(R) Wireless-AX 22260",
+const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
+       .name = "Intel(R) Wi-Fi 6 AX101",
+       .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax200_cfg_cc = {
+       .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = {
 };
 
 const struct iwl_cfg killer1650x_2ax_cfg = {
-       .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)",
+       .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
 };
 
 const struct iwl_cfg killer1650w_2ax_cfg = {
-       .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)",
+       .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
 };
 
 const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
-       .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
+       .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
        .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
 };
 
 const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
-       .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
+       .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
        .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
index f119c49cd39cd516c09459f4f145f898bd4fe38b..d7380016f1c0d4f4d85fd9b063f7344f5bd954c0 100644 (file)
@@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
        if (!range) {
                IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
                        le32_to_cpu(reg->region_id), type);
+               memset(*data, 0, le32_to_cpu((*data)->len));
                return;
        }
 
@@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
                if (range_size < 0) {
                        IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
                                le32_to_cpu(reg->region_id), type);
+                       memset(*data, 0, le32_to_cpu((*data)->len));
                        return;
                }
                range = range + range_size;
@@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
 
        trigger = fwrt->dump.active_trigs[id].trig;
 
-       size = sizeof(*dump_file);
-       size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
-
+       size = iwl_fw_ini_get_trigger_len(fwrt, trigger);
        if (!size)
                return NULL;
 
+       size += sizeof(*dump_file);
+
        dump_file = vzalloc(size);
        if (!dump_file)
                return NULL;
@@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
        iwl_dump_error_desc->len = 0;
 
        ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
-       if (ret) {
+       if (ret)
                kfree(iwl_dump_error_desc);
-       } else {
-               set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-
-               /* trigger nmi to halt the fw */
-               iwl_force_nmi(fwrt->trans);
-       }
+       else
+               iwl_trans_sync_nmi(fwrt->trans);
 
        return ret;
 }
@@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
 
 void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
 {
-       /* if the wait event timeout elapses instead of wake up then
-        * the driver did not receive NMI interrupt and can not assume the FW
-        * is halted
-        */
-       int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq,
-                                    !test_bit(STATUS_FW_WAIT_DUMP,
-                                              &fwrt->trans->status),
-                                    msecs_to_jiffies(2000));
-       if (!ret) {
-               /* failed to receive NMI interrupt, assuming the FW is stuck */
-               set_bit(STATUS_FW_ERROR, &fwrt->trans->status);
-
-               clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-       }
-
-       /* Assuming the op mode mutex is held at this point */
        iwl_fw_dbg_collect_sync(fwrt);
 
        iwl_trans_stop_device(fwrt->trans);
index 7adf4e4e841a92f3ae98534b011175e3cfb00ce7..12310e3d2fc5aa7b544b08c95ad3086c3de799c1 100644 (file)
@@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
        fwrt->ops_ctx = ops_ctx;
        INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
        iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
-       init_waitqueue_head(&fwrt->trans->fw_halt_waitq);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
 
index f5f87773667b0bd2b68ccadcddcf3184777c04f9..93070848280a4e425f95de9842b5bbc42000fcfb 100644 (file)
@@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
 extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
 extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22260_2ax_cfg;
+extern const struct iwl_cfg iwl_ax200_cfg_cc;
 extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
 extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
 extern const struct iwl_cfg killer1650x_2ax_cfg;
index aea6d03e545a1db063f795c49a2b2c123b954452..e539bc94eff7fdcee8e4c979fe0581710768b275 100644 (file)
@@ -327,6 +327,7 @@ enum {
 #define CSR_HW_REV_TYPE_NONE           (0x00001F0)
 #define CSR_HW_REV_TYPE_QNJ            (0x0000360)
 #define CSR_HW_REV_TYPE_QNJ_B0         (0x0000364)
+#define CSR_HW_REV_TYPE_QUZ            (0x0000354)
 #define CSR_HW_REV_TYPE_HR_CDB         (0x0000340)
 #define CSR_HW_REV_TYPE_SO             (0x0000370)
 #define CSR_HW_REV_TYPE_TY             (0x0000420)
index bbebbf3efd57db1a2101a5e6cc02cf2095666dd7..d8690acee40c0c45668f6480b10daff295633500 100644 (file)
@@ -338,7 +338,6 @@ enum iwl_d3_status {
  *     are sent
  * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
- * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump
  */
 enum iwl_trans_status {
        STATUS_SYNC_HCMD_ACTIVE,
@@ -351,7 +350,6 @@ enum iwl_trans_status {
        STATUS_TRANS_GOING_IDLE,
        STATUS_TRANS_IDLE,
        STATUS_TRANS_DEAD,
-       STATUS_FW_WAIT_DUMP,
 };
 
 static inline int
@@ -618,6 +616,7 @@ struct iwl_trans_ops {
        struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
                                                 u32 dump_mask);
        void (*debugfs_cleanup)(struct iwl_trans *trans);
+       void (*sync_nmi)(struct iwl_trans *trans);
 };
 
 /**
@@ -831,7 +830,6 @@ struct iwl_trans {
        u32 lmac_error_event_table[2];
        u32 umac_error_event_table;
        unsigned int error_event_table_tlv_status;
-       wait_queue_head_t fw_halt_waitq;
 
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
        /* prevent double restarts due to the same erroneous FW */
        if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
                iwl_op_mode_nic_error(trans->op_mode);
+}
 
-       if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status))
-               wake_up(&trans->fw_halt_waitq);
-
+static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+       if (trans->ops->sync_nmi)
+               trans->ops->sync_nmi(trans);
 }
 
 /*****************************************************
index 3a92c09d46926fa6d2d565df9e1479e7988ccde1..6a3b11dd2edf53cf4352178a56189c1ebe99de66 100644 (file)
@@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
 
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
-       kfree(mvmvif->ap_wep_key);
-       mvmvif->ap_wep_key = NULL;
-
        mutex_unlock(&mvm->mutex);
 }
 
@@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                ret = iwl_mvm_update_sta(mvm, vif, sta);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTHORIZED) {
-               /* if wep is used, need to set the key for the station now */
-               if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
-                       mvm_sta->wep_key =
-                               kmemdup(mvmvif->ap_wep_key,
-                                       sizeof(*mvmvif->ap_wep_key) +
-                                       mvmvif->ap_wep_key->keylen,
-                                       GFP_KERNEL);
-                       if (!mvm_sta->wep_key) {
-                               ret = -ENOMEM;
-                               goto out_unlock;
-                       }
-
-                       ret = iwl_mvm_set_sta_key(mvm, vif, sta,
-                                                 mvm_sta->wep_key,
-                                                 STA_KEY_IDX_INVALID);
-               } else {
-                       ret = 0;
-               }
+               ret = 0;
 
                /* we don't support TDLS during DCM */
                if (iwl_mvm_phy_ctx_count(mvm) > 1)
@@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                                                   NL80211_TDLS_DISABLE_LINK);
                }
 
-               /* Remove STA key if this is an AP using WEP */
-               if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
-                       int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
-                                                           mvm_sta->wep_key);
-
-                       if (!ret)
-                               ret = rm_ret;
-                       kfree(mvm_sta->wep_key);
-                       mvm_sta->wep_key = NULL;
-               }
-
                if (unlikely(ret &&
                             test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
                                      &mvm->status)))
@@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
                                  struct ieee80211_sta *sta, u32 changed)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (changed & (IEEE80211_RC_BW_CHANGED |
+                      IEEE80211_RC_SUPP_RATES_CHANGED |
+                      IEEE80211_RC_NSS_CHANGED))
+               iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+                                    true);
 
        if (vif->type == NL80211_IFTYPE_STATION &&
            changed & IEEE80211_RC_NSS_CHANGED)
@@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                break;
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
-               if (vif->type == NL80211_IFTYPE_AP) {
-                       struct iwl_mvm_vif *mvmvif =
-                               iwl_mvm_vif_from_mac80211(vif);
-
-                       mvmvif->ap_wep_key = kmemdup(key,
-                                                    sizeof(*key) + key->keylen,
-                                                    GFP_KERNEL);
-                       if (!mvmvif->ap_wep_key)
-                               return -ENOMEM;
-               }
-
-               if (vif->type != NL80211_IFTYPE_STATION)
-                       return 0;
-               break;
+               if (vif->type == NL80211_IFTYPE_STATION)
+                       break;
+               if (iwl_mvm_has_new_tx_api(mvm))
+                       return -EOPNOTSUPP;
+               /* support HW crypto on TX */
+               return 0;
        default:
                /* currently FW supports only one optional cipher scheme */
                if (hw->n_cipher_schemes &&
@@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
                if (ret) {
                        IWL_WARN(mvm, "set key failed\n");
+                       key->hw_key_idx = STA_KEY_IDX_INVALID;
                        /*
                         * can't add key for RX, but we don't need it
-                        * in the device for TX so still return 0
+                        * in the device for TX so still return 0,
+                        * unless we have new TX API where we cannot
+                        * put key material into the TX_CMD
                         */
-                       key->hw_key_idx = STA_KEY_IDX_INVALID;
-                       ret = 0;
+                       if (iwl_mvm_has_new_tx_api(mvm))
+                               ret = -EOPNOTSUPP;
+                       else
+                               ret = 0;
                }
 
                break;
index bca6f6b536d9754133c9ac8ab00e271c1f2bb06f..a50dc53df08698ff0afafbef8f74692007afc17b 100644 (file)
@@ -498,7 +498,6 @@ struct iwl_mvm_vif {
        netdev_features_t features;
 
        struct iwl_probe_resp_data __rcu *probe_resp_data;
-       struct ieee80211_key_conf *ap_wep_key;
 };
 
 static inline struct iwl_mvm_vif *
index 498c315291cfac599bd23df37fad6c3e28541201..98d123dd7177845ff1676df2dbbdb2c492d89f6f 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
 
                iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
                list_del_init(&mvmtxq->list);
+               local_bh_disable();
                iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+               local_bh_enable();
        }
 
        mutex_unlock(&mvm->mutex);
@@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
                                   timeout);
 
-       if (mvmvif->ap_wep_key) {
-               u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
-
-               __set_bit(key_offset, mvm->fw_key_table);
-
-               if (key_offset == STA_KEY_IDX_INVALID)
-                       return -ENOSPC;
-
-               ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
-                                          mvmvif->ap_wep_key, true, 0, NULL, 0,
-                                          key_offset, 0);
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
 
-       if (mvmvif->ap_wep_key) {
-               int i;
-
-               if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
-                                         mvm->fw_key_table)) {
-                       IWL_ERR(mvm, "offset %d not used in fw key table.\n",
-                               mvmvif->ap_wep_key->hw_key_idx);
-                       return -ENOENT;
-               }
-
-               /* track which key was deleted last */
-               for (i = 0; i < STA_KEY_MAX_NUM; i++) {
-                       if (mvm->fw_key_deleted[i] < U8_MAX)
-                               mvm->fw_key_deleted[i]++;
-               }
-               mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
-               ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
-                                              mvmvif->ap_wep_key, true);
-               if (ret)
-                       return ret;
-       }
-
        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
        if (ret)
                IWL_WARN(mvm, "Failed sending remove station\n");
index 79700c7310a1a3cf38162d6ed3c582fe7c6ed67c..b4d4071b865db90dc81fd8c2db7d410b66686f30 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data {
  *     the BA window. To be used for UAPSD only.
  * @ptk_pn: per-queue PTK PN data structures
  * @dup_data: per queue duplicate packet detection data
- * @wep_key: used in AP mode. Is a duplicate of the WEP key.
  * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
  * @tx_ant: the index of the antenna to use for data tx to this station. Only
  *     used during connection establishment (e.g. for the 4 way handshake
@@ -426,8 +425,6 @@ struct iwl_mvm_sta {
        struct iwl_mvm_key_pn __rcu *ptk_pn[4];
        struct iwl_mvm_rxq_dup_data *dup_data;
 
-       struct ieee80211_key_conf *wep_key;
-
        u8 reserved_queue;
 
        /* Temporary, until the new TLC will control the Tx protection */
index 2b94e4cef56cfc5fd25a0343f189116ca0e78c96..9f1af8da9dc181eb1dcc48b2f0fb1d1b7ffa9836 100644 (file)
@@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
        {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
 
-       {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
+       {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
        {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
        {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
+       {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
 
        {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
        {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
index bf8b61a476c5b017fac5a94e6cd7eb894116d169..59213164f35e3814cd0d7618cf8f6f54fd873f5b 100644 (file)
@@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
 
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-void iwl_trans_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
index fe8269d023def832e97701bc7c1bd0c57df05a96..79c1dc05f9488ddae52b2d820897b0d255afb1d3 100644 (file)
@@ -3318,7 +3318,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
        .unref = iwl_trans_pcie_unref,                                  \
        .dump_data = iwl_trans_pcie_dump_data,                          \
        .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
-       .d3_resume = iwl_trans_pcie_d3_resume
+       .d3_resume = iwl_trans_pcie_d3_resume,                          \
+       .sync_nmi = iwl_trans_pcie_sync_nmi
 
 #ifdef CONFIG_PM_SLEEP
 #define IWL_TRANS_PM_OPS                                               \
@@ -3542,6 +3543,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        } else if (cfg == &iwl_ax101_cfg_qu_hr) {
                if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+                   trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+                       trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+               } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
                    CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
                        trans->cfg = &iwl_ax101_cfg_qu_hr;
                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
@@ -3560,7 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
-                  (trans->cfg != &iwl22260_2ax_cfg ||
+                  (trans->cfg != &iwl_ax200_cfg_cc ||
                    trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
                u32 hw_status;
 
@@ -3637,7 +3642,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        return ERR_PTR(ret);
 }
 
-void iwl_trans_sync_nmi(struct iwl_trans *trans)
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
 {
        unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
 
index 88530d9f4a54ced4e6c8d081cedaf7b0354cde8b..38d11033898716b3e9c1c5fae581c692d4ae44fe 100644 (file)
@@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
                               cmd_str);
                ret = -ETIMEDOUT;
 
-               iwl_trans_sync_nmi(trans);
+               iwl_trans_pcie_sync_nmi(trans);
                goto cancel;
        }
 
index 9fbd37d23e851caf0042ef2861263815893969cb..7be73e2c4681cadc48ed5a838068d419196b413f 100644 (file)
@@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                               iwl_get_cmd_string(trans, cmd->id));
                ret = -ETIMEDOUT;
 
-               iwl_trans_sync_nmi(trans);
+               iwl_trans_pcie_sync_nmi(trans);
                goto cancel;
        }
 
index 0838af04d681a3e37f71b8d47cb58435ffbac5fb..524eb580599571c9c4e4ee782d760e0287389ddd 100644 (file)
@@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        enum nl80211_band band;
        const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
        struct net *net;
-       int idx;
+       int idx, i;
        int n_limits = 0;
 
        if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                goto failed_hw;
        }
 
+       data->if_combination.max_interfaces = 0;
+       for (i = 0; i < n_limits; i++)
+               data->if_combination.max_interfaces +=
+                       data->if_limits[i].max;
+
        data->if_combination.n_limits = n_limits;
-       data->if_combination.max_interfaces = 2048;
        data->if_combination.limits = data->if_limits;
 
-       hw->wiphy->iface_combinations = &data->if_combination;
-       hw->wiphy->n_iface_combinations = 1;
+       /*
+        * If we actually were asked to support combinations,
+        * advertise them - if there's only a single thing like
+        * only IBSS then don't advertise it as combinations.
+        */
+       if (data->if_combination.max_interfaces > 1) {
+               hw->wiphy->iface_combinations = &data->if_combination;
+               hw->wiphy->n_iface_combinations = 1;
+       }
 
        if (param->ciphers) {
                memcpy(data->ciphers, param->ciphers,
index d54dda67d036c19cffce6bc30765c39dc93ee326..3af45949e868909e3073335cc302411c5e6c9761 100644 (file)
@@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
        bus_ops->rmw = mt7603_rmw;
        dev->mt76.bus = bus_ops;
 
+       spin_lock_init(&dev->ps_lock);
+
        INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
        tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
                     (unsigned long)dev);
index 5e31d7da96fc88e5fab246c61ec1d37a328a8700..5abc02b578185a6467571f549987dd147e2b3d3b 100644 (file)
@@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
                 MT_BA_CONTROL_1_RESET));
 }
 
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
                            int ba_size)
 {
        u32 addr = mt7603_wtbl2_addr(wcid);
@@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
                mt76_clear(dev, addr + (15 * 4), tid_mask);
                return;
        }
-       mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
-
-       mt7603_mac_stop(dev);
-       switch (tid) {
-       case 0:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
-               break;
-       case 1:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
-               break;
-       case 2:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
-                              ssn);
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
-                              ssn >> 8);
-               break;
-       case 3:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
-               break;
-       case 4:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
-               break;
-       case 5:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
-                              ssn);
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
-                              ssn >> 4);
-               break;
-       case 6:
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
-               break;
-       case 7:
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
-               break;
-       }
-       mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
-       mt7603_mac_start(dev);
 
        for (i = 7; i > 0; i--) {
                if (ba_size >= MT_AGG_SIZE_LIMIT(i))
@@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_rate *rate = &info->control.rates[0];
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
        struct ieee80211_vif *vif = info->control.vif;
        struct mt7603_vif *mvif;
        int wlan_idx;
@@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
        int tx_count = 8;
        u8 frame_type, frame_subtype;
        u16 fc = le16_to_cpu(hdr->frame_control);
+       u16 seqno = 0;
        u8 vif_idx = 0;
        u32 val;
        u8 bw;
@@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
                tx_count = 0x1f;
 
        val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
-             FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl));
+                 MT_TXD3_SN_VALID;
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               seqno = le16_to_cpu(hdr->seq_ctrl);
+       else if (ieee80211_is_back_req(hdr->frame_control))
+               seqno = le16_to_cpu(bar->start_seq_num);
+       else
+               val &= ~MT_TXD3_SN_VALID;
+
+       val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
+
        txwi[3] = cpu_to_le32(val);
 
        if (key) {
index cc0fe0933b2d8043e622f1b513817b6528bbcaae..a3c4ef198bfeea965fb3f8d71e9d622cc546bb1a 100644 (file)
@@ -372,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
        struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
        struct sk_buff_head list;
 
-       mt76_stop_tx_queues(&dev->mt76, sta, false);
+       mt76_stop_tx_queues(&dev->mt76, sta, true);
        mt7603_wtbl_set_ps(dev, msta, ps);
        if (ps)
                return;
@@ -584,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        case IEEE80211_AMPDU_TX_OPERATIONAL:
                mtxq->aggr = true;
                mtxq->send_bar = false;
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
                break;
        case IEEE80211_AMPDU_TX_STOP_FLUSH:
        case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
                mtxq->aggr = false;
                ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
                break;
        case IEEE80211_AMPDU_TX_START:
                mtxq->agg_ssn = *ssn << 4;
@@ -598,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                break;
        case IEEE80211_AMPDU_TX_STOP_CONT:
                mtxq->aggr = false;
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                break;
        }
index 79f3324294328b0f5b842a98ea23115b96470ca2..6049f3b7c8fec429de86329d35662c4659f711ee 100644 (file)
@@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
 int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
                            int ba_size);
 
 void mt7603_pse_client_reset(struct mt7603_dev *dev);
index 9ed231abe91676119d751b06cfa995a7f5dd716c..4fe5a83ca5a41713d894a4210fe5ef0d68e47e17 100644 (file)
@@ -466,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                return;
 
        rcu_read_lock();
-       mt76_tx_status_lock(mdev, &list);
 
        if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
                wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
@@ -479,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                                          drv_priv);
        }
 
+       mt76_tx_status_lock(mdev, &list);
+
        if (wcid) {
                if (stat->pktid >= MT_PACKET_ID_FIRST)
                        status.skb = mt76_tx_status_skb_get(mdev, wcid,
@@ -498,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                if (*update == 0 && stat_val == stat_cache &&
                    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
                        msta->n_frames++;
-                       goto out;
+                       mt76_tx_status_unlock(mdev, &list);
+                       rcu_read_unlock();
+                       return;
                }
 
                mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
@@ -514,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
 
        if (status.skb)
                mt76_tx_status_skb_done(mdev, status.skb, &list);
-       else
-               ieee80211_tx_status_ext(mt76_hw(dev), &status);
-
-out:
        mt76_tx_status_unlock(mdev, &list);
+
+       if (!status.skb)
+               ieee80211_tx_status_ext(mt76_hw(dev), &status);
        rcu_read_unlock();
 }
 
index 4b1744e9fb78a08c59fe0ac71d0d9962ae6761be..50b92ca92bd75c33d783ed9bfdf0f01f7d5ce0ae 100644 (file)
@@ -673,7 +673,6 @@ enum rt2x00_state_flags {
        CONFIG_CHANNEL_HT40,
        CONFIG_POWERSAVING,
        CONFIG_HT_DISABLED,
-       CONFIG_QOS_DISABLED,
        CONFIG_MONITORING,
 
        /*
index 2825560e2424dbc766c5d5489491ff7dc67c5211..e8462f25d2522c4dbe95215b3de0279213cdc2b4 100644 (file)
@@ -642,18 +642,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                        rt2x00dev->intf_associated--;
 
                rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
-
-               clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
        }
 
-       /*
-        * Check for access point which do not support 802.11e . We have to
-        * generate data frames sequence number in S/W for such AP, because
-        * of H/W bug.
-        */
-       if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
-               set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
-
        /*
         * When the erp information has changed, we should perform
         * additional configuration steps. For all other changes we are done.
index 92ddc19e7bf747a23d0eb24c15b05ff111751754..4834b4eb0206408093a54d47b2a6a5831aa75674 100644 (file)
@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
        if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
                /*
                 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
-                * seqno on retransmited data (non-QOS) frames. To workaround
-                * the problem let's generate seqno in software if QOS is
-                * disabled.
+                * seqno on retransmitted data (non-QOS) and management frames.
+                * To workaround the problem let's generate seqno in software.
+                * Except for beacons which are transmitted periodically by H/W
+                * hence hardware has to assign seqno for them.
                 */
-               if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
-                       __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
-               else
+               if (ieee80211_is_beacon(hdr->frame_control)) {
+                       __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
                        /* H/W will generate sequence number */
                        return;
+               }
+
+               __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
        }
 
        /*
index 2b26f762fbc3b3f5f837e27267d0de9fc1b9e5c8..01acb6e533655d6b6041cbbde43af8c1364aec60 100644 (file)
@@ -1074,6 +1074,12 @@ static const struct spi_device_id st95hf_id[] = {
 };
 MODULE_DEVICE_TABLE(spi, st95hf_id);
 
+static const struct of_device_id st95hf_spi_of_match[] = {
+        { .compatible = "st,st95hf" },
+        { },
+};
+MODULE_DEVICE_TABLE(of, st95hf_spi_of_match);
+
 static int st95hf_probe(struct spi_device *nfc_spi_dev)
 {
        int ret;
@@ -1260,6 +1266,7 @@ static struct spi_driver st95hf_driver = {
        .driver = {
                .name = "st95hf",
                .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(st95hf_spi_of_match),
        },
        .id_table = st95hf_id,
        .probe = st95hf_probe,
index b72a303176c70962e04f8304a816c78f812512c1..9486acc08402db3a17079c0ec2589ce445bb23d2 100644 (file)
@@ -198,14 +198,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
                return NULL;
 
        nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
-       if (nd_btt->id < 0) {
-               kfree(nd_btt);
-               return NULL;
-       }
+       if (nd_btt->id < 0)
+               goto out_nd_btt;
 
        nd_btt->lbasize = lbasize;
-       if (uuid)
+       if (uuid) {
                uuid = kmemdup(uuid, 16, GFP_KERNEL);
+               if (!uuid)
+                       goto out_put_id;
+       }
        nd_btt->uuid = uuid;
        dev = &nd_btt->dev;
        dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
@@ -220,6 +221,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
                return NULL;
        }
        return dev;
+
+out_put_id:
+       ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+
+out_nd_btt:
+       kfree(nd_btt);
+       return NULL;
 }
 
 struct device *nd_btt_create(struct nd_region *nd_region)
index 7849bf1812c47e64f76e16c0ccf8f0ccc6f3bc25..f293556cbbf6d747004b132a23c440296ec760f7 100644 (file)
@@ -2249,9 +2249,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
        if (!nsblk->uuid)
                goto blk_err;
        memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
-       if (name[0])
+       if (name[0]) {
                nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
                                GFP_KERNEL);
+               if (!nsblk->alt_name)
+                       goto blk_err;
+       }
        res = nsblk_add_resource(nd_region, ndd, nsblk,
                        __le64_to_cpu(nd_label->dpa));
        if (!res)
index bc2f700feef8abdad873197237f34f765055c22f..0279eb1da3ef5ae40c5ab80ef6940732dca03bf0 100644 (file)
@@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page,
 
        while (len) {
                mem = kmap_atomic(page);
-               chunk = min_t(unsigned int, len, PAGE_SIZE);
+               chunk = min_t(unsigned int, len, PAGE_SIZE - off);
                memcpy_flushcache(pmem_addr, mem + off, chunk);
                kunmap_atomic(mem);
                len -= chunk;
                off = 0;
                page++;
-               pmem_addr += PAGE_SIZE;
+               pmem_addr += chunk;
        }
 }
 
@@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
 
        while (len) {
                mem = kmap_atomic(page);
-               chunk = min_t(unsigned int, len, PAGE_SIZE);
+               chunk = min_t(unsigned int, len, PAGE_SIZE - off);
                rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
                kunmap_atomic(mem);
                if (rem)
@@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
                len -= chunk;
                off = 0;
                page++;
-               pmem_addr += PAGE_SIZE;
+               pmem_addr += chunk;
        }
        return BLK_STS_OK;
 }
index f8bb746a549f7b993dcf61f052acde8303d11cae..a570f2263a424e96908c559750454a086a3df3e2 100644 (file)
@@ -22,6 +22,8 @@ static bool key_revalidate = true;
 module_param(key_revalidate, bool, 0444);
 MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
 
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
 static void *key_data(struct key *key)
 {
        struct encrypted_key_payload *epayload = dereference_key_locked(key);
@@ -75,6 +77,16 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
        return key;
 }
 
+static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
+               struct key **key)
+{
+       *key = nvdimm_request_key(nvdimm);
+       if (!*key)
+               return zero_key;
+
+       return key_data(*key);
+}
+
 static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
                key_serial_t id, int subclass)
 {
@@ -105,36 +117,57 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
        return key;
 }
 
-static struct key *nvdimm_key_revalidate(struct nvdimm *nvdimm)
+static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
+               key_serial_t id, int subclass, struct key **key)
+{
+       *key = NULL;
+       if (id == 0) {
+               if (subclass == NVDIMM_BASE_KEY)
+                       return zero_key;
+               else
+                       return NULL;
+       }
+
+       *key = nvdimm_lookup_user_key(nvdimm, id, subclass);
+       if (!*key)
+               return NULL;
+
+       return key_data(*key);
+}
+
+
+static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
 {
        struct key *key;
        int rc;
+       const void *data;
 
        if (!nvdimm->sec.ops->change_key)
-               return NULL;
+               return -EOPNOTSUPP;
 
-       key = nvdimm_request_key(nvdimm);
-       if (!key)
-               return NULL;
+       data = nvdimm_get_key_payload(nvdimm, &key);
 
        /*
         * Send the same key to the hardware as new and old key to
         * verify that the key is good.
         */
-       rc = nvdimm->sec.ops->change_key(nvdimm, key_data(key),
-                       key_data(key), NVDIMM_USER);
+       rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
        if (rc < 0) {
                nvdimm_put_key(key);
-               key = NULL;
+               return rc;
        }
-       return key;
+
+       nvdimm_put_key(key);
+       nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+       return 0;
 }
 
 static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key = NULL;
+       struct key *key;
+       const void *data;
        int rc;
 
        /* The bus lock should be held at the top level of the call stack */
@@ -160,16 +193,11 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
                if (!key_revalidate)
                        return 0;
 
-               key = nvdimm_key_revalidate(nvdimm);
-               if (!key)
-                       return nvdimm_security_freeze(nvdimm);
+               return nvdimm_key_revalidate(nvdimm);
        } else
-               key = nvdimm_request_key(nvdimm);
+               data = nvdimm_get_key_payload(nvdimm, &key);
 
-       if (!key)
-               return -ENOKEY;
-
-       rc = nvdimm->sec.ops->unlock(nvdimm, key_data(key));
+       rc = nvdimm->sec.ops->unlock(nvdimm, data);
        dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
@@ -195,6 +223,7 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
        struct key *key;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -214,11 +243,12 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
                return -EBUSY;
        }
 
-       key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-       if (!key)
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
                return -ENOKEY;
 
-       rc = nvdimm->sec.ops->disable(nvdimm, key_data(key));
+       rc = nvdimm->sec.ops->disable(nvdimm, data);
        dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
@@ -235,6 +265,7 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
        struct key *key, *newkey;
        int rc;
+       const void *data, *newdata;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -249,22 +280,19 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
                return -EIO;
        }
 
-       if (keyid == 0)
-               key = NULL;
-       else {
-               key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-               if (!key)
-                       return -ENOKEY;
-       }
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
+               return -ENOKEY;
 
-       newkey = nvdimm_lookup_user_key(nvdimm, new_keyid, NVDIMM_NEW_KEY);
-       if (!newkey) {
+       newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
+                       NVDIMM_NEW_KEY, &newkey);
+       if (!newdata) {
                nvdimm_put_key(key);
                return -ENOKEY;
        }
 
-       rc = nvdimm->sec.ops->change_key(nvdimm, key ? key_data(key) : NULL,
-                       key_data(newkey), pass_type);
+       rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
        dev_dbg(dev, "key: %d %d update%s: %s\n",
                        key_serial(key), key_serial(newkey),
                        pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
@@ -286,8 +314,9 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key;
+       struct key *key = NULL;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -319,11 +348,12 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
                return -EOPNOTSUPP;
        }
 
-       key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-       if (!key)
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
                return -ENOKEY;
 
-       rc = nvdimm->sec.ops->erase(nvdimm, key_data(key), pass_type);
+       rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
        dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
                        pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
                        rc == 0 ? "success" : "fail");
@@ -337,8 +367,9 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key;
+       struct key *key = NULL;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -368,15 +399,12 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
                return -EBUSY;
        }
 
-       if (keyid == 0)
-               key = NULL;
-       else {
-               key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-               if (!key)
-                       return -ENOKEY;
-       }
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
+               return -ENOKEY;
 
-       rc = nvdimm->sec.ops->overwrite(nvdimm, key ? key_data(key) : NULL);
+       rc = nvdimm->sec.ops->overwrite(nvdimm, data);
        dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
index 810ab0fbcccbf844d6af9fe6eb40e155355d9731..d820f3edd4311821696e6045d843024166b83dfc 100644 (file)
@@ -7,7 +7,6 @@
  */
 #include <linux/etherdevice.h>
 #include <linux/kernel.h>
-#include <linux/nvmem-consumer.h>
 #include <linux/of_net.h>
 #include <linux/phy.h>
 #include <linux/export.h>
index 0420f7e8ad5b0f926b56e5fe33adf82753aa70a5..0e7703fe733fc79cf672687d610d1a50f25aa6fb 100644 (file)
@@ -526,6 +526,60 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
+/**
+ * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for
+ *                                      target voltage.
+ * @dev:       Device for which we do this operation.
+ * @u_volt:    Target voltage.
+ *
+ * Search for OPP with highest (ceil) frequency and has voltage <= u_volt.
+ *
+ * Return: matching *opp, else returns ERR_PTR in case of error which should be
+ * handled using IS_ERR.
+ *
+ * Error return values can be:
+ * EINVAL:     bad parameters
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
+                                                    unsigned long u_volt)
+{
+       struct opp_table *opp_table;
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+       if (!dev || !u_volt) {
+               dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__,
+                       u_volt);
+               return ERR_PTR(-EINVAL);
+       }
+
+       opp_table = _find_opp_table(dev);
+       if (IS_ERR(opp_table))
+               return ERR_CAST(opp_table);
+
+       mutex_lock(&opp_table->lock);
+
+       list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+               if (temp_opp->available) {
+                       if (temp_opp->supplies[0].u_volt > u_volt)
+                               break;
+                       opp = temp_opp;
+               }
+       }
+
+       /* Increment the reference count of OPP */
+       if (!IS_ERR(opp))
+               dev_pm_opp_get(opp);
+
+       mutex_unlock(&opp_table->lock);
+       dev_pm_opp_put_opp_table(opp_table);
+
+       return opp;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt);
+
 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
                            struct dev_pm_opp_supply *supply)
 {
index ad969d9fc9815a173385588e034a1c650ba6c868..c2644a9fe80f1f1432e6e62ce6cbb7d8fbf0986b 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Power supply driver for the goldfish emulator
  *
index 6e294b4d3635fe399586f05045297646d9c8c574..f89f9d02e7884f321f858f18a020e122d83c8a03 100644 (file)
@@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
        blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
 
 raw:
-       block->blocks = (private->real_cyl *
+       block->blocks = ((unsigned long) private->real_cyl *
                          private->rdc_data.trk_per_cyl *
                          blk_per_trk);
 
        dev_info(&device->cdev->dev,
-                "DASD with %d KB/block, %d KB total size, %d KB/track, "
+                "DASD with %u KB/block, %lu KB total size, %u KB/track, "
                 "%s\n", (block->bp_block >> 10),
-                ((private->real_cyl *
+                (((unsigned long) private->real_cyl *
                   private->rdc_data.trk_per_cyl *
                   blk_per_trk * (block->bp_block >> 9)) >> 1),
                 ((blk_per_trk * block->bp_block) >> 10),
index fd2146bcc0add9aae3b71ba4cc88b788b7702591..e17364e13d2f71ec289a47f6a79f7c56ae85b264 100644 (file)
@@ -629,7 +629,7 @@ con3270_init(void)
                     (void (*)(unsigned long)) con3270_read_tasklet,
                     (unsigned long) condev->read);
 
-       raw3270_add_view(&condev->view, &con3270_fn, 1);
+       raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
 
        INIT_LIST_HEAD(&condev->freemem);
        for (i = 0; i < CON3270_STRING_PAGES; i++) {
index 8f3a2eeb28dca0b579d2d773057296e92f379342..8b48ba9c598ecedcac5ca78c86f97d3587e71c7d 100644 (file)
@@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp)
 
        init_waitqueue_head(&fp->wait);
        fp->fs_pid = get_pid(task_pid(current));
-       rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+       rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
+                             RAW3270_VIEW_LOCK_BH);
        if (rc) {
                fs3270_free_view(&fp->view);
                goto out;
index f8cd2935fbfd48c5aef1ad980457cc55433b6db4..63a41b16876102a8f1210396f1970d0d5e77df18 100644 (file)
@@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
  * Add view to device with minor "minor".
  */
 int
-raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
 {
        unsigned long flags;
        struct raw3270 *rp;
@@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
                view->cols = rp->cols;
                view->ascebc = rp->ascebc;
                spin_lock_init(&view->lock);
+               lockdep_set_subclass(&view->lock, subclass);
                list_add(&view->list, &rp->view_list);
                rc = 0;
                spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
index 114ca7cbf8897dce734e59cb283923e2c160b3bf..3afaa35f73513cba47566e9601b775339e6cdf78 100644 (file)
@@ -150,6 +150,8 @@ struct raw3270_fn {
 struct raw3270_view {
        struct list_head list;
        spinlock_t lock;
+#define RAW3270_VIEW_LOCK_IRQ  0
+#define RAW3270_VIEW_LOCK_BH   1
        atomic_t ref_count;
        struct raw3270 *dev;
        struct raw3270_fn *fn;
@@ -158,7 +160,7 @@ struct raw3270_view {
        unsigned char *ascebc;          /* ascii -> ebcdic table */
 };
 
-int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
 int raw3270_activate_view(struct raw3270_view *);
 void raw3270_del_view(struct raw3270_view *);
 void raw3270_deactivate_view(struct raw3270_view *);
index 2b0c36c2c5688ebf6ef0266d66cad52793b7ae1b..98d7fc152e32f85e8e53e1e56b26244753c67a00 100644 (file)
@@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
                return PTR_ERR(tp);
 
        rc = raw3270_add_view(&tp->view, &tty3270_fn,
-                             tty->index + RAW3270_FIRSTMINOR);
+                             tty->index + RAW3270_FIRSTMINOR,
+                             RAW3270_VIEW_LOCK_BH);
        if (rc) {
                tty3270_free_view(tp);
                return rc;
index 6a340f2c355693170776992c6a1d018e78d6ee96..5ea83dc4f1d740e9db1288ed9d8f70423312d5ba 100644 (file)
@@ -751,8 +751,8 @@ void ap_queue_prepare_remove(struct ap_queue *aq)
        __ap_flush_queue(aq);
        /* set REMOVE state to prevent new messages are queued in */
        aq->state = AP_STATE_REMOVE;
-       del_timer_sync(&aq->timeout);
        spin_unlock_bh(&aq->lock);
+       del_timer_sync(&aq->timeout);
 }
 
 void ap_queue_remove(struct ap_queue *aq)
index 3e85d665c572957aa491917b1433b5254812b0f6..45eb0c14b8807d17c228ef563506e82b6a50d533 100644 (file)
@@ -51,7 +51,8 @@ static debug_info_t *debug_info;
 
 static void __init pkey_debug_init(void)
 {
-       debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
+       /* 5 arguments per dbf entry (including the format string ptr) */
+       debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
        debug_register_view(debug_info, &debug_sprintf_view);
        debug_set_level(debug_info, 3);
 }
index 7617d21cb2960618cbc097bbf85cb8515234aa14..f63c5c871d3ddf48f4a88fe3c2b2db684394c7b3 100644 (file)
@@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
                if (priv->channel[direction] == NULL) {
                        if (direction == CTCM_WRITE)
                                channel_free(priv->channel[CTCM_READ]);
+                       result = -ENODEV;
                        goto out_dev;
                }
                priv->channel[direction]->netdev = dev;
index 3d401d02c01955bc02304fe4f761993e73eaad46..bdd177e3d76229bae1fb67003a9045a3f3e26c87 100644 (file)
@@ -91,6 +91,7 @@ aic7770_probe(struct device *dev)
        ahc = ahc_alloc(&aic7xxx_driver_template, name);
        if (ahc == NULL)
                return (ENOMEM);
+       ahc->dev = dev;
        error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data,
                               eisaBase);
        if (error != 0) {
index 5614921b4041acf4a10c004646d93fa1aa0ebff6..88b90f9806c99d04cd07fa632e243e5b5528d651 100644 (file)
@@ -943,6 +943,7 @@ struct ahc_softc {
         * Platform specific device information.
         */
        ahc_dev_softc_t           dev_softc;
+       struct device             *dev;
 
        /*
         * Bus specific device information.
index 3c9c17450bb399b0a9885270c2070bfb7fa6b24c..d5c4a0d2370620afe5a0fe3ad39bd44025c14429 100644 (file)
@@ -860,8 +860,8 @@ int
 ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
                 int flags, bus_dmamap_t *mapp)
 {
-       *vaddr = pci_alloc_consistent(ahc->dev_softc,
-                                     dmat->maxsize, mapp);
+       /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */
+       *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC);
        if (*vaddr == NULL)
                return ENOMEM;
        return 0;
@@ -871,8 +871,7 @@ void
 ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
                void* vaddr, bus_dmamap_t map)
 {
-       pci_free_consistent(ahc->dev_softc, dmat->maxsize,
-                           vaddr, map);
+       dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map);
 }
 
 int
@@ -1123,8 +1122,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
 
        host->transportt = ahc_linux_transport_template;
 
-       retval = scsi_add_host(host,
-                       (ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
+       retval = scsi_add_host(host, ahc->dev);
        if (retval) {
                printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
                scsi_host_put(host);
index 0fc14dac7070ce6bab629c08a2998638fc26553e..717d8d1082ce18ae9899870e43238c1443fdb36f 100644 (file)
@@ -250,6 +250,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
        ahc->dev_softc = pci;
+       ahc->dev = &pci->dev;
        error = ahc_pci_config(ahc, entry);
        if (error != 0) {
                ahc_free(ahc);
index dfba4921b265a0fa7fdf2409295483a68c5e181c..5bf61431434be73a381fc9d59b6fee3b9441cab7 100644 (file)
@@ -2162,7 +2162,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
                FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
                             fc_rport_state(rdata));
 
-               rdata->flags &= ~FC_RP_STARTED;
                fc_rport_enter_delete(rdata, RPORT_EV_STOP);
                mutex_unlock(&rdata->rp_mutex);
                kref_put(&rdata->kref, fc_rport_destroy);
index 601b9f1de26758a1d078a69de36469ad4318d39d..07dfc17d48246551a63966444172b4197e30def4 100644 (file)
@@ -1706,8 +1706,12 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
                        ret = BLK_STS_DEV_RESOURCE;
                break;
        default:
+               if (unlikely(!scsi_device_online(sdev)))
+                       scsi_req(req)->result = DID_NO_CONNECT << 16;
+               else
+                       scsi_req(req)->result = DID_ERROR << 16;
                /*
-                * Make sure to release all allocated ressources when
+                * Make sure to release all allocated resources when
                 * we hit an error, as we will never see this command
                 * again.
                 */
index 808ed92ed66fe4bedfbbba500452d86771e8162e..1bb1cb6513491b805075456e41c45a253e77c83a 100644 (file)
@@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
 
        size = usb_endpoint_maxp(devpriv->ep_tx);
        devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
-       if (!devpriv->usb_tx_buf) {
-               kfree(devpriv->usb_rx_buf);
+       if (!devpriv->usb_tx_buf)
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
        if (!devpriv)
                return -ENOMEM;
 
+       mutex_init(&devpriv->mut);
+       usb_set_intfdata(intf, devpriv);
+
        ret = ni6501_find_endpoints(dev);
        if (ret)
                return ret;
@@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
        if (ret)
                return ret;
 
-       mutex_init(&devpriv->mut);
-       usb_set_intfdata(intf, devpriv);
-
        ret = comedi_alloc_subdevices(dev, 2);
        if (ret)
                return ret;
index 6234b649d887ccb3abac4c73dcb38aa095768600..65dc6c51037e30edf30b1ad7e0d6eea7c0390e86 100644 (file)
@@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
 
        size = usb_endpoint_maxp(devpriv->ep_tx);
        devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
-       if (!devpriv->usb_tx_buf) {
-               kfree(devpriv->usb_rx_buf);
+       if (!devpriv->usb_tx_buf)
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
 
        devpriv->model = board->model;
 
+       sema_init(&devpriv->limit_sem, 8);
+
        ret = vmk80xx_find_usb_endpoints(dev);
        if (ret)
                return ret;
@@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
        if (ret)
                return ret;
 
-       sema_init(&devpriv->limit_sem, 8);
-
        usb_set_intfdata(intf, devpriv);
 
        if (devpriv->model == VMK8055_MODEL)
index 526e0dbea5b5714618b463cb3eab98b0895e99f6..81af768e7248e514699541552e2eb2cd99e1bc5e 100644 (file)
@@ -298,7 +298,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
        *last_block = current_block;
 
        /* shift in advance in case of it followed by too many gaps */
-       if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) {
+       if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
                /* err should reassign to 0 after submitting */
                err = 0;
                goto submit_bio_out;
index acdbc07fd2592c03084a0c6fb6e89aee073f58fd..2fc8bc22b57baa39a3d4a8cd56ae0f3d2d0a0af2 100644 (file)
 #define AD7192_CH_AIN3         BIT(6) /* AIN3 - AINCOM */
 #define AD7192_CH_AIN4         BIT(7) /* AIN4 - AINCOM */
 
-#define AD7193_CH_AIN1P_AIN2M  0x000  /* AIN1(+) - AIN2(-) */
-#define AD7193_CH_AIN3P_AIN4M  0x001  /* AIN3(+) - AIN4(-) */
-#define AD7193_CH_AIN5P_AIN6M  0x002  /* AIN5(+) - AIN6(-) */
-#define AD7193_CH_AIN7P_AIN8M  0x004  /* AIN7(+) - AIN8(-) */
+#define AD7193_CH_AIN1P_AIN2M  0x001  /* AIN1(+) - AIN2(-) */
+#define AD7193_CH_AIN3P_AIN4M  0x002  /* AIN3(+) - AIN4(-) */
+#define AD7193_CH_AIN5P_AIN6M  0x004  /* AIN5(+) - AIN6(-) */
+#define AD7193_CH_AIN7P_AIN8M  0x008  /* AIN7(+) - AIN8(-) */
 #define AD7193_CH_TEMP         0x100 /* Temp senseor */
 #define AD7193_CH_AIN2P_AIN2M  0x200 /* AIN2(+) - AIN2(-) */
 #define AD7193_CH_AIN1         0x401 /* AIN1 - AINCOM */
index 029c3bf42d4d942f2e58c81cfb03292fc8eae0d3..07774c000c5a68db9f7f6c1e93eae1840fac23ec 100644 (file)
@@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644,
 static IIO_DEV_ATTR_IPEAK(0644,
                ade7854_read_32bit,
                ade7854_write_32bit,
-               ADE7854_VPEAK);
+               ADE7854_IPEAK);
 static IIO_DEV_ATTR_APHCAL(0644,
                ade7854_read_16bit,
                ade7854_write_16bit,
index 18936cdb10830ae4506435377a3342bb0c2e076e..956daf8c3bd24f9b1ccce2a254c26ea5e75e7ba9 100644 (file)
@@ -1431,7 +1431,7 @@ int most_register_interface(struct most_interface *iface)
 
        INIT_LIST_HEAD(&iface->p->channel_list);
        iface->p->dev_id = id;
-       snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
+       strcpy(iface->p->name, iface->description);
        iface->dev.init_name = iface->p->name;
        iface->dev.bus = &mc.bus;
        iface->dev.parent = &mc.dev;
index 09a183dfc52640027bf571184ee4e69e819c5951..a31db15cd7c0d36bf2e4dee32d7b1201bc2674c5 100644 (file)
@@ -1520,11 +1520,13 @@ static int __init sc16is7xx_init(void)
 #endif
        return ret;
 
+#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
 err_spi:
+#endif
 #ifdef CONFIG_SERIAL_SC16IS7XX_I2C
        i2c_del_driver(&sc16is7xx_i2c_uart_driver);
-#endif
 err_i2c:
+#endif
        uart_unregister_driver(&sc16is7xx_uart);
        return ret;
 }
index 2d1c626312cd8892d5eae0fa65e03d3347a09e81..3cd139752d3f70f9dfce1fe2c43f3eab03cf433a 100644 (file)
@@ -2512,14 +2512,16 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
                         * center of the last stop bit in sampling clocks.
                         */
                        int last_stop = bits * 2 - 1;
-                       int deviation = min_err * srr * last_stop / 2 / baud;
+                       int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
+                                                         (int)(srr + 1),
+                                                         2 * (int)baud);
 
                        if (abs(deviation) >= 2) {
                                /* At least two sampling clocks off at the
                                 * last stop bit; we can increase the error
                                 * margin by shifting the sampling point.
                                 */
-                               int shift = min(-8, max(7, deviation / 2));
+                               int shift = clamp(deviation / 2, -8, 7);
 
                                hssrr |= (shift << HSCIF_SRHP_SHIFT) &
                                         HSCIF_SRHP_MASK;
index d34984aa646dc4d30813fdfb91290fbef958d0fb..650c66886c80f5d1c9770321949251af17e112a6 100644 (file)
@@ -1520,7 +1520,8 @@ static void csi_J(struct vc_data *vc, int vpar)
                        return;
        }
        scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
-       update_region(vc, (unsigned long) start, count);
+       if (con_should_update(vc))
+               do_update_region(vc, (unsigned long) start, count);
        vc->vc_need_wrap = 0;
 }
 
index 5ace833de74620bf1a089186057d766e7e3def63..351af88231ada1145bfb72326f905bfaac3819ca 100644 (file)
@@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
                                u64 start, u64 size, u64 end,
                                u64 userspace_addr, int perm)
 {
-       struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+       struct vhost_umem_node *tmp, *node;
 
+       if (!size)
+               return -EFAULT;
+
+       node = kmalloc(sizeof(*node), GFP_ATOMIC);
        if (!node)
                return -ENOMEM;
 
index 1c7955f5cdaf2e776026390f615806f3e6ce535c..128f2dbe256a4eb0f6124294f883b29d8a57e10e 100644 (file)
@@ -203,8 +203,7 @@ void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi)
  */
 void afs_init_callback_state(struct afs_server *server)
 {
-       if (!test_and_clear_bit(AFS_SERVER_FL_NEW, &server->flags))
-               server->cb_s_break++;
+       server->cb_s_break++;
 }
 
 /*
index 8ee5972893ed5a75583bfb2821a42636403ee086..2f8acb4c556d28c77ec6d8c130eaaf3916a38403 100644 (file)
@@ -34,7 +34,7 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
 static int afs_deliver_yfs_cb_callback(struct afs_call *);
 
 #define CM_NAME(name) \
-       const char afs_SRXCB##name##_name[] __tracepoint_string =       \
+       char afs_SRXCB##name##_name[] __tracepoint_string =     \
                "CB." #name
 
 /*
index 1a4ce07fb406da8e3a4e0d12c6fda605636ccb47..9cedc3fc1b7744679010f4aae412c92925cd3b3a 100644 (file)
@@ -216,9 +216,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
        set_nlink(inode, 2);
        inode->i_uid            = GLOBAL_ROOT_UID;
        inode->i_gid            = GLOBAL_ROOT_GID;
-       inode->i_ctime.tv_sec   = get_seconds();
-       inode->i_ctime.tv_nsec  = 0;
-       inode->i_atime          = inode->i_mtime = inode->i_ctime;
+       inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode);
        inode->i_blocks         = 0;
        inode_set_iversion_raw(inode, 0);
        inode->i_generation     = 0;
index bb1f244b2b3ac2ff4a8428a8f72132a6ed230e01..3904ab0b95632af35c4db8fcad36ee6e1f277b47 100644 (file)
@@ -474,7 +474,6 @@ struct afs_server {
        time64_t                put_time;       /* Time at which last put */
        time64_t                update_at;      /* Time at which to next update the record */
        unsigned long           flags;
-#define AFS_SERVER_FL_NEW      0               /* New server, don't inc cb_s_break */
 #define AFS_SERVER_FL_NOT_READY        1               /* The record is not ready for use */
 #define AFS_SERVER_FL_NOT_FOUND        2               /* VL server says no such server */
 #define AFS_SERVER_FL_VL_FAIL  3               /* Failed to access VL server */
@@ -827,7 +826,7 @@ static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest
 
 static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
 {
-       return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break;
+       return vnode->cb_break + vnode->cb_v_break;
 }
 
 static inline bool afs_cb_is_broken(unsigned int cb_break,
@@ -835,7 +834,6 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
                                    const struct afs_cb_interest *cbi)
 {
        return !cbi || cb_break != (vnode->cb_break +
-                                   cbi->server->cb_s_break +
                                    vnode->volume->cb_v_break);
 }
 
index 2c588f9bbbda226ec64fa0670e9c92c700f259e6..15c7e82d80cb30c0358db68f416b8d88b06dbc7c 100644 (file)
@@ -572,13 +572,17 @@ static void afs_deliver_to_call(struct afs_call *call)
                case -ENODATA:
                case -EBADMSG:
                case -EMSGSIZE:
-               default:
                        abort_code = RXGEN_CC_UNMARSHAL;
                        if (state != AFS_CALL_CL_AWAIT_REPLY)
                                abort_code = RXGEN_SS_UNMARSHAL;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
                                                abort_code, ret, "KUM");
                        goto local_abort;
+               default:
+                       abort_code = RX_USER_ABORT;
+                       rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                                               abort_code, ret, "KER");
+                       goto local_abort;
                }
        }
 
@@ -610,6 +614,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
        bool stalled = false;
        u64 rtt;
        u32 life, last_life;
+       bool rxrpc_complete = false;
 
        DECLARE_WAITQUEUE(myself, current);
 
@@ -621,7 +626,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
                rtt2 = 2;
 
        timeout = rtt2;
-       last_life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
+       rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
 
        add_wait_queue(&call->waitq, &myself);
        for (;;) {
@@ -639,7 +644,12 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
                if (afs_check_call_state(call, AFS_CALL_COMPLETE))
                        break;
 
-               life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
+               if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
+                       /* rxrpc terminated the call. */
+                       rxrpc_complete = true;
+                       break;
+               }
+
                if (timeout == 0 &&
                    life == last_life && signal_pending(current)) {
                        if (stalled)
@@ -663,12 +673,16 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
        remove_wait_queue(&call->waitq, &myself);
        __set_current_state(TASK_RUNNING);
 
-       /* Kill off the call if it's still live. */
        if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
-               _debug("call interrupted");
-               if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                           RX_USER_ABORT, -EINTR, "KWI"))
-                       afs_set_call_complete(call, -EINTR, 0);
+               if (rxrpc_complete) {
+                       afs_set_call_complete(call, call->error, call->abort_code);
+               } else {
+                       /* Kill off the call if it's still live. */
+                       _debug("call interrupted");
+                       if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                                                   RX_USER_ABORT, -EINTR, "KWI"))
+                               afs_set_call_complete(call, -EINTR, 0);
+               }
        }
 
        spin_lock_bh(&call->state_lock);
index 642afa2e9783c4f95284980dd8054610fa4d49cf..65b33b6da48b9411c8385a27869785d5076713b1 100644 (file)
@@ -226,7 +226,6 @@ static struct afs_server *afs_alloc_server(struct afs_net *net,
        RCU_INIT_POINTER(server->addresses, alist);
        server->addr_version = alist->version;
        server->uuid = *uuid;
-       server->flags = (1UL << AFS_SERVER_FL_NEW);
        server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
        rwlock_init(&server->fs_lock);
        INIT_HLIST_HEAD(&server->cb_volumes);
index 72efcfcf9f95efd2b5cae1257a8d01247367ebeb..0122d7445fba1e07eaf62b4be1d1e69c66e5f7c4 100644 (file)
@@ -264,6 +264,7 @@ static void afs_kill_pages(struct address_space *mapping,
                                first = page->index + 1;
                        lock_page(page);
                        generic_error_remove_page(mapping, page);
+                       unlock_page(page);
                }
 
                __pagevec_release(&pv);
index 920bf3b4b0ef5e5296d3cec2c2e82a8ab78dc0ac..cccc75d15970cbc61e8e1bcd1735fb28dc549123 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/highmem.h>
+#include <linux/sched/mm.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -427,9 +428,13 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
        unsigned long this_sum_bytes = 0;
        int i;
        u64 offset;
+       unsigned nofs_flag;
+
+       nofs_flag = memalloc_nofs_save();
+       sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
+                      GFP_KERNEL);
+       memalloc_nofs_restore(nofs_flag);
 
-       sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
-                      GFP_NOFS);
        if (!sums)
                return BLK_STS_RESOURCE;
 
@@ -472,8 +477,10 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
 
                                bytes_left = bio->bi_iter.bi_size - total_bytes;
 
-                               sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
-                                              GFP_NOFS);
+                               nofs_flag = memalloc_nofs_save();
+                               sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
+                                                     bytes_left), GFP_KERNEL);
+                               memalloc_nofs_restore(nofs_flag);
                                BUG_ON(!sums); /* -ENOMEM */
                                sums->len = bytes_left;
                                ordered = btrfs_lookup_ordered_extent(inode,
index 6fde2b2741ef13b2bdabfac4ad29937d7e784afa..45e3cfd1198bc29265d28a360f3d261a70144953 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/writeback.h>
+#include <linux/sched/mm.h>
 #include "ctree.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
@@ -442,7 +443,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
                        cur = entry->list.next;
                        sum = list_entry(cur, struct btrfs_ordered_sum, list);
                        list_del(&sum->list);
-                       kfree(sum);
+                       kvfree(sum);
                }
                kmem_cache_free(btrfs_ordered_extent_cache, entry);
        }
index a8f429882249476303868e4b68f272653ad72ebb..0637149fb9f9a7d26b383a2abedc08bb026d301e 100644 (file)
@@ -1766,6 +1766,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
 {
        struct ceph_inode_info *dci = ceph_inode(dir);
+       unsigned hash;
 
        switch (dci->i_dir_layout.dl_dir_hash) {
        case 0: /* for backward compat */
@@ -1773,8 +1774,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
                return dn->d_name.hash;
 
        default:
-               return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+               spin_lock(&dn->d_lock);
+               hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
                                     dn->d_name.name, dn->d_name.len);
+               spin_unlock(&dn->d_lock);
+               return hash;
        }
 }
 
index 2d61ddda9bf5653fb559fb320422fd84ec470419..c2feb310ac1e0d7bd0c263f4d5944cc2c22852ac 100644 (file)
@@ -1163,6 +1163,19 @@ static int splice_dentry(struct dentry **pdn, struct inode *in)
        return 0;
 }
 
+static int d_name_cmp(struct dentry *dentry, const char *name, size_t len)
+{
+       int ret;
+
+       /* take d_lock to ensure dentry->d_name stability */
+       spin_lock(&dentry->d_lock);
+       ret = dentry->d_name.len - len;
+       if (!ret)
+               ret = memcmp(dentry->d_name.name, name, len);
+       spin_unlock(&dentry->d_lock);
+       return ret;
+}
+
 /*
  * Incorporate results into the local cache.  This is either just
  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
@@ -1412,7 +1425,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
                err = splice_dentry(&req->r_dentry, in);
                if (err < 0)
                        goto done;
-       } else if (rinfo->head->is_dentry) {
+       } else if (rinfo->head->is_dentry &&
+                  !d_name_cmp(req->r_dentry, rinfo->dname, rinfo->dname_len)) {
                struct ceph_vino *ptvino = NULL;
 
                if ((le32_to_cpu(rinfo->diri.in->cap.caps) & CEPH_CAP_FILE_SHARED) ||
index 21c33ed048ed7095aa2347159ab472d8550b9721..9049c2a3e972f499ea1371e8c4b8112ead98a6e3 100644 (file)
@@ -1414,6 +1414,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                        list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
                        ci->i_prealloc_cap_flush = NULL;
                }
+
+               if (drop &&
+                  ci->i_wrbuffer_ref_head == 0 &&
+                  ci->i_wr_ref == 0 &&
+                  ci->i_dirty_caps == 0 &&
+                  ci->i_flushing_caps == 0) {
+                      ceph_put_snap_context(ci->i_head_snapc);
+                      ci->i_head_snapc = NULL;
+               }
        }
        spin_unlock(&ci->i_ceph_lock);
        while (!list_empty(&to_remove)) {
@@ -2161,10 +2170,39 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
        return path;
 }
 
+/* Duplicate the dentry->d_name.name safely */
+static int clone_dentry_name(struct dentry *dentry, const char **ppath,
+                            int *ppathlen)
+{
+       u32 len;
+       char *name;
+
+retry:
+       len = READ_ONCE(dentry->d_name.len);
+       name = kmalloc(len + 1, GFP_NOFS);
+       if (!name)
+               return -ENOMEM;
+
+       spin_lock(&dentry->d_lock);
+       if (dentry->d_name.len != len) {
+               spin_unlock(&dentry->d_lock);
+               kfree(name);
+               goto retry;
+       }
+       memcpy(name, dentry->d_name.name, len);
+       spin_unlock(&dentry->d_lock);
+
+       name[len] = '\0';
+       *ppath = name;
+       *ppathlen = len;
+       return 0;
+}
+
 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
                             const char **ppath, int *ppathlen, u64 *pino,
-                            int *pfreepath)
+                            bool *pfreepath, bool parent_locked)
 {
+       int ret;
        char *path;
 
        rcu_read_lock();
@@ -2173,8 +2211,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
        if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
                *pino = ceph_ino(dir);
                rcu_read_unlock();
-               *ppath = dentry->d_name.name;
-               *ppathlen = dentry->d_name.len;
+               if (parent_locked) {
+                       *ppath = dentry->d_name.name;
+                       *ppathlen = dentry->d_name.len;
+               } else {
+                       ret = clone_dentry_name(dentry, ppath, ppathlen);
+                       if (ret)
+                               return ret;
+                       *pfreepath = true;
+               }
                return 0;
        }
        rcu_read_unlock();
@@ -2182,13 +2227,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
        if (IS_ERR(path))
                return PTR_ERR(path);
        *ppath = path;
-       *pfreepath = 1;
+       *pfreepath = true;
        return 0;
 }
 
 static int build_inode_path(struct inode *inode,
                            const char **ppath, int *ppathlen, u64 *pino,
-                           int *pfreepath)
+                           bool *pfreepath)
 {
        struct dentry *dentry;
        char *path;
@@ -2204,7 +2249,7 @@ static int build_inode_path(struct inode *inode,
        if (IS_ERR(path))
                return PTR_ERR(path);
        *ppath = path;
-       *pfreepath = 1;
+       *pfreepath = true;
        return 0;
 }
 
@@ -2215,7 +2260,7 @@ static int build_inode_path(struct inode *inode,
 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
                                  struct inode *rdiri, const char *rpath,
                                  u64 rino, const char **ppath, int *pathlen,
-                                 u64 *ino, int *freepath)
+                                 u64 *ino, bool *freepath, bool parent_locked)
 {
        int r = 0;
 
@@ -2225,7 +2270,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
                     ceph_snap(rinode));
        } else if (rdentry) {
                r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
-                                       freepath);
+                                       freepath, parent_locked);
                dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
                     *ppath);
        } else if (rpath || rino) {
@@ -2251,7 +2296,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        const char *path2 = NULL;
        u64 ino1 = 0, ino2 = 0;
        int pathlen1 = 0, pathlen2 = 0;
-       int freepath1 = 0, freepath2 = 0;
+       bool freepath1 = false, freepath2 = false;
        int len;
        u16 releases;
        void *p, *end;
@@ -2259,16 +2304,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
 
        ret = set_request_path_attr(req->r_inode, req->r_dentry,
                              req->r_parent, req->r_path1, req->r_ino1.ino,
-                             &path1, &pathlen1, &ino1, &freepath1);
+                             &path1, &pathlen1, &ino1, &freepath1,
+                             test_bit(CEPH_MDS_R_PARENT_LOCKED,
+                                       &req->r_req_flags));
        if (ret < 0) {
                msg = ERR_PTR(ret);
                goto out;
        }
 
+       /* If r_old_dentry is set, then assume that its parent is locked */
        ret = set_request_path_attr(NULL, req->r_old_dentry,
                              req->r_old_dentry_dir,
                              req->r_path2, req->r_ino2.ino,
-                             &path2, &pathlen2, &ino2, &freepath2);
+                             &path2, &pathlen2, &ino2, &freepath2, true);
        if (ret < 0) {
                msg = ERR_PTR(ret);
                goto out_free1;
index 89aa37fa0f84c55fe3324e50b554f6fcef5b5be5..b26e12cd8ec3317f44e04baaca375826a974e632 100644 (file)
@@ -572,7 +572,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
        old_snapc = NULL;
 
 update_snapc:
-       if (ci->i_head_snapc) {
+       if (ci->i_wrbuffer_ref_head == 0 &&
+           ci->i_wr_ref == 0 &&
+           ci->i_dirty_caps == 0 &&
+           ci->i_flushing_caps == 0) {
+               ci->i_head_snapc = NULL;
+       } else {
                ci->i_head_snapc = ceph_get_snap_context(new_snapc);
                dout(" new snapc is %p\n", new_snapc);
        }
index 5b18d45857409eb06624a894e2308e6728c39ba3..585ad3207cb120a34c3da24e418dd20a6daf04cf 100644 (file)
@@ -1333,6 +1333,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
 }
 
 struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
 
 #define CIFS_CACHE_READ_FLG    1
@@ -1855,6 +1856,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
 #endif /* CONFIG_CIFS_ACL */
 
 void cifs_oplock_break(struct work_struct *work);
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
 
 extern const struct slow_work_ops cifs_oplock_break_ops;
 extern struct workqueue_struct *cifsiod_wq;
index 89006e044973ec2d97ad784236f66cb447431693..7037a137fa5330c807da19a91acd054d76ad031a 100644 (file)
@@ -360,12 +360,30 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
        return cifs_file;
 }
 
-/*
- * Release a reference on the file private data. This may involve closing
- * the filehandle out on the server. Must be called without holding
- * tcon->open_file_lock and cifs_file->file_info_lock.
+/**
+ * cifsFileInfo_put - release a reference of file priv data
+ *
+ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
  */
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+{
+       _cifsFileInfo_put(cifs_file, true);
+}
+
+/**
+ * _cifsFileInfo_put - release a reference of file priv data
+ *
+ * This may involve closing the filehandle @cifs_file out on the
+ * server. Must be called without holding tcon->open_file_lock and
+ * cifs_file->file_info_lock.
+ *
+ * If @wait_for_oplock_handler is true and we are releasing the last
+ * reference, wait for any running oplock break handler of the file
+ * and cancel any pending one. If calling this function from the
+ * oplock break handler, you need to pass false.
+ *
+ */
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
 {
        struct inode *inode = d_inode(cifs_file->dentry);
        struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
@@ -414,7 +432,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 
        spin_unlock(&tcon->open_file_lock);
 
-       oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+       oplock_break_cancelled = wait_oplock_handler ?
+               cancel_work_sync(&cifs_file->oplock_break) : false;
 
        if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
                struct TCP_Server_Info *server = tcon->ses->server;
@@ -2858,7 +2877,6 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
        struct cifs_tcon *tcon;
        struct cifs_sb_info *cifs_sb;
        struct dentry *dentry = ctx->cfile->dentry;
-       unsigned int i;
        int rc;
 
        tcon = tlink_tcon(ctx->cfile->tlink);
@@ -2922,10 +2940,6 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
                kref_put(&wdata->refcount, cifs_uncached_writedata_release);
        }
 
-       if (!ctx->direct_io)
-               for (i = 0; i < ctx->npages; i++)
-                       put_page(ctx->bv[i].bv_page);
-
        cifs_stats_bytes_written(tcon, ctx->total_len);
        set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
 
@@ -3563,7 +3577,6 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
        struct iov_iter *to = &ctx->iter;
        struct cifs_sb_info *cifs_sb;
        struct cifs_tcon *tcon;
-       unsigned int i;
        int rc;
 
        tcon = tlink_tcon(ctx->cfile->tlink);
@@ -3647,15 +3660,8 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
                kref_put(&rdata->refcount, cifs_uncached_readdata_release);
        }
 
-       if (!ctx->direct_io) {
-               for (i = 0; i < ctx->npages; i++) {
-                       if (ctx->should_dirty)
-                               set_page_dirty(ctx->bv[i].bv_page);
-                       put_page(ctx->bv[i].bv_page);
-               }
-
+       if (!ctx->direct_io)
                ctx->total_len = ctx->len - iov_iter_count(to);
-       }
 
        /* mask nodata case */
        if (rc == -ENODATA)
@@ -4603,6 +4609,7 @@ void cifs_oplock_break(struct work_struct *work)
                                                             cinode);
                cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
        }
+       _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
        cifs_done_oplock_break(cinode);
 }
 
index 53fdb5df0d2ebd67b2687b76d98441ee7faa41be..538fd7d807e476f9998820b2abc93ae7f6a7c127 100644 (file)
@@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
        if (rc == 0 || rc != -EBUSY)
                goto do_rename_exit;
 
+       /* Don't fall back to using SMB on SMB 2+ mount */
+       if (server->vals->protocol_id != 0)
+               goto do_rename_exit;
+
        /* open-file renames don't work across directories */
        if (to_dentry->d_parent != from_dentry->d_parent)
                goto do_rename_exit;
index bee203055b300b1d13a284e9571225f7ba4888c8..0dc6f08020acbc81dbc99966cb229842c688acb2 100644 (file)
@@ -501,8 +501,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
                                           CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
                                           &pCifsInode->flags);
 
-                               queue_work(cifsoplockd_wq,
-                                          &netfile->oplock_break);
+                               cifs_queue_oplock_break(netfile);
                                netfile->oplock_break_cancelled = false;
 
                                spin_unlock(&tcon->open_file_lock);
@@ -607,6 +606,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
        spin_unlock(&cinode->writers_lock);
 }
 
+/**
+ * cifs_queue_oplock_break - queue the oplock break handler for cfile
+ *
+ * This function is called from the demultiplex thread when it
+ * receives an oplock break for @cfile.
+ *
+ * Assumes the tcon->open_file_lock is held.
+ * Assumes cfile->file_info_lock is NOT held.
+ */
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
+{
+       /*
+        * Bump the handle refcount now while we hold the
+        * open_file_lock to enforce the validity of it for the oplock
+        * break handler. The matching put is done at the end of the
+        * handler.
+        */
+       cifsFileInfo_get(cfile);
+
+       queue_work(cifsoplockd_wq, &cfile->oplock_break);
+}
+
 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
 {
        clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
@@ -768,6 +789,11 @@ cifs_aio_ctx_alloc(void)
 {
        struct cifs_aio_ctx *ctx;
 
+       /*
+        * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
+        * to false so that we know when we have to unreference pages within
+        * cifs_aio_ctx_release()
+        */
        ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
        if (!ctx)
                return NULL;
@@ -786,7 +812,23 @@ cifs_aio_ctx_release(struct kref *refcount)
                                        struct cifs_aio_ctx, refcount);
 
        cifsFileInfo_put(ctx->cfile);
-       kvfree(ctx->bv);
+
+       /*
+        * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
+        * which means that iov_iter_get_pages() was a success and thus that
+        * we have taken reference on pages.
+        */
+       if (ctx->bv) {
+               unsigned i;
+
+               for (i = 0; i < ctx->npages; i++) {
+                       if (ctx->should_dirty)
+                               set_page_dirty(ctx->bv[i].bv_page);
+                       put_page(ctx->bv[i].bv_page);
+               }
+               kvfree(ctx->bv);
+       }
+
        kfree(ctx);
 }
 
index 0e3570e40ff8e8d233389063290ccdecdfb44a25..e311f58dc1c82809de0283e434a9aff09356825f 100644 (file)
@@ -555,7 +555,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
                        clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
                                  &cinode->flags);
 
-               queue_work(cifsoplockd_wq, &cfile->oplock_break);
+               cifs_queue_oplock_break(cfile);
                kfree(lw);
                return true;
        }
@@ -712,8 +712,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
                                           CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
                                           &cinode->flags);
                                spin_unlock(&cfile->file_info_lock);
-                               queue_work(cifsoplockd_wq,
-                                          &cfile->oplock_break);
+
+                               cifs_queue_oplock_break(cfile);
 
                                spin_unlock(&tcon->open_file_lock);
                                spin_unlock(&cifs_tcp_ses_lock);
index 00225e699d036c079441d53ee896e7abcdda1149..c36ff0d1fe2a8b7b2668466464fc9da9e45a774f 100644 (file)
@@ -2389,6 +2389,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
                       &resp_buftype);
+       if (!rc)
+               SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
        if (!rc || !err_iov.iov_base) {
                rc = -ENOENT;
                goto free_path;
index 21ad01d55ab2d32f0a4406c4cd0704cfab80026a..a37774a55f3aa1b8598ebd30b063fff67d4cb32f 100644 (file)
@@ -832,8 +832,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
                } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
                        /* ops set to 3.0 by default for default so update */
                        ses->server->ops = &smb21_operations;
-               } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
+                       ses->server->vals = &smb21_values;
+               } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
                        ses->server->ops = &smb311_operations;
+                       ses->server->vals = &smb311_values;
+               }
        } else if (le16_to_cpu(rsp->DialectRevision) !=
                                ses->server->vals->protocol_id) {
                /* if requested single dialect ensure returned dialect matched */
@@ -3448,8 +3451,6 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        rqst.rq_nvec = 1;
 
        rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
-       cifs_small_buf_release(req);
-
        rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
 
        if (rc) {
@@ -3465,12 +3466,15 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
                                    io_parms->tcon->tid, ses->Suid,
                                    io_parms->offset, 0);
                free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+               cifs_small_buf_release(req);
                return rc == -ENODATA ? 0 : rc;
        } else
                trace_smb3_read_done(xid, req->PersistentFileId,
                                    io_parms->tcon->tid, ses->Suid,
                                    io_parms->offset, io_parms->length);
 
+       cifs_small_buf_release(req);
+
        *nbytes = le32_to_cpu(rsp->DataLength);
        if ((*nbytes > CIFS_MAX_MSGSIZE) ||
            (*nbytes > io_parms->length)) {
@@ -3769,7 +3773,6 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 
        rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
                            &resp_buftype, flags, &rsp_iov);
-       cifs_small_buf_release(req);
        rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
 
        if (rc) {
@@ -3787,6 +3790,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
                                     io_parms->offset, *nbytes);
        }
 
+       cifs_small_buf_release(req);
        free_rsp_buf(resp_buftype, rsp);
        return rc;
 }
index ca0671d55aa699df6723ffb897706b6579c68780..e5e54da1715f630cf1471e625e72045b6f31e112 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -33,6 +33,7 @@
 #include <linux/sizes.h>
 #include <linux/mmu_notifier.h>
 #include <linux/iomap.h>
+#include <asm/pgalloc.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -1407,7 +1408,9 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        unsigned long pmd_addr = vmf->address & PMD_MASK;
+       struct vm_area_struct *vma = vmf->vma;
        struct inode *inode = mapping->host;
+       pgtable_t pgtable = NULL;
        struct page *zero_page;
        spinlock_t *ptl;
        pmd_t pmd_entry;
@@ -1422,12 +1425,22 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
        *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
                        DAX_PMD | DAX_ZERO_PAGE, false);
 
+       if (arch_needs_pgtable_deposit()) {
+               pgtable = pte_alloc_one(vma->vm_mm);
+               if (!pgtable)
+                       return VM_FAULT_OOM;
+       }
+
        ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
        if (!pmd_none(*(vmf->pmd))) {
                spin_unlock(ptl);
                goto fallback;
        }
 
+       if (pgtable) {
+               pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+               mm_inc_nr_ptes(vma->vm_mm);
+       }
        pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
        pmd_entry = pmd_mkhuge(pmd_entry);
        set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
@@ -1436,6 +1449,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
        return VM_FAULT_NOPAGE;
 
 fallback:
+       if (pgtable)
+               pte_free(vma->vm_mm, pgtable);
        trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
        return VM_FAULT_FALLBACK;
 }
index 89aa8412b5f5972466e79c711cee475d8d42ebfe..0e9fb2cb1984b5c1d04623847e5fb523565dc856 100644 (file)
@@ -338,7 +338,7 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
        tail = ctx->cached_cq_tail;
        /* See comment at the top of the file */
        smp_rmb();
-       if (tail + 1 == READ_ONCE(ring->r.head))
+       if (tail - READ_ONCE(ring->r.head) == ring->ring_entries)
                return NULL;
 
        ctx->cached_cq_tail++;
@@ -682,11 +682,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
                list_add_tail(&req->list, &ctx->poll_list);
 }
 
-static void io_file_put(struct io_submit_state *state, struct file *file)
+static void io_file_put(struct io_submit_state *state)
 {
-       if (!state) {
-               fput(file);
-       } else if (state->file) {
+       if (state->file) {
                int diff = state->has_refs - state->used_refs;
 
                if (diff)
@@ -711,7 +709,7 @@ static struct file *io_file_get(struct io_submit_state *state, int fd)
                        state->ios_left--;
                        return state->file;
                }
-               io_file_put(state, NULL);
+               io_file_put(state);
        }
        state->file = fget_many(fd, state->ios_left);
        if (!state->file)
@@ -742,7 +740,7 @@ static bool io_file_supports_async(struct file *file)
 }
 
 static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
-                     bool force_nonblock, struct io_submit_state *state)
+                     bool force_nonblock)
 {
        const struct io_uring_sqe *sqe = s->sqe;
        struct io_ring_ctx *ctx = req->ctx;
@@ -940,7 +938,7 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
 }
 
 static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
-                  bool force_nonblock, struct io_submit_state *state)
+                  bool force_nonblock)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct kiocb *kiocb = &req->rw;
@@ -949,7 +947,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
        size_t iov_count;
        int ret;
 
-       ret = io_prep_rw(req, s, force_nonblock, state);
+       ret = io_prep_rw(req, s, force_nonblock);
        if (ret)
                return ret;
        file = kiocb->ki_filp;
@@ -987,7 +985,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
 }
 
 static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
-                   bool force_nonblock, struct io_submit_state *state)
+                   bool force_nonblock)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct kiocb *kiocb = &req->rw;
@@ -996,7 +994,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
        size_t iov_count;
        int ret;
 
-       ret = io_prep_rw(req, s, force_nonblock, state);
+       ret = io_prep_rw(req, s, force_nonblock);
        if (ret)
                return ret;
 
@@ -1338,8 +1336,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 }
 
 static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                          const struct sqe_submit *s, bool force_nonblock,
-                          struct io_submit_state *state)
+                          const struct sqe_submit *s, bool force_nonblock)
 {
        int ret, opcode;
 
@@ -1355,18 +1352,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
        case IORING_OP_READV:
                if (unlikely(s->sqe->buf_index))
                        return -EINVAL;
-               ret = io_read(req, s, force_nonblock, state);
+               ret = io_read(req, s, force_nonblock);
                break;
        case IORING_OP_WRITEV:
                if (unlikely(s->sqe->buf_index))
                        return -EINVAL;
-               ret = io_write(req, s, force_nonblock, state);
+               ret = io_write(req, s, force_nonblock);
                break;
        case IORING_OP_READ_FIXED:
-               ret = io_read(req, s, force_nonblock, state);
+               ret = io_read(req, s, force_nonblock);
                break;
        case IORING_OP_WRITE_FIXED:
-               ret = io_write(req, s, force_nonblock, state);
+               ret = io_write(req, s, force_nonblock);
                break;
        case IORING_OP_FSYNC:
                ret = io_fsync(req, s->sqe, force_nonblock);
@@ -1459,7 +1456,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
                        s->has_user = cur_mm != NULL;
                        s->needs_lock = true;
                        do {
-                               ret = __io_submit_sqe(ctx, req, s, false, NULL);
+                               ret = __io_submit_sqe(ctx, req, s, false);
                                /*
                                 * We can get EAGAIN for polled IO even though
                                 * we're forcing a sync submission from here,
@@ -1625,7 +1622,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
        if (unlikely(ret))
                goto out;
 
-       ret = __io_submit_sqe(ctx, req, s, true, state);
+       ret = __io_submit_sqe(ctx, req, s, true);
        if (ret == -EAGAIN) {
                struct io_uring_sqe *sqe_copy;
 
@@ -1671,7 +1668,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
 static void io_submit_state_end(struct io_submit_state *state)
 {
        blk_finish_plug(&state->plug);
-       io_file_put(state, NULL);
+       io_file_put(state);
        if (state->free_reqs)
                kmem_cache_free_bulk(req_cachep, state->free_reqs,
                                        &state->reqs[state->cur_req]);
@@ -1741,7 +1738,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
        head = ctx->cached_sq_head;
        /* See comment at the top of this file */
        smp_rmb();
-       if (head == READ_ONCE(ring->r.tail))
+       /* make sure SQ entry isn't read before tail */
+       if (head == smp_load_acquire(&ring->r.tail))
                return false;
 
        head = READ_ONCE(ring->array[head & ctx->sq_mask]);
@@ -1866,7 +1864,8 @@ static int io_sq_thread(void *data)
 
                        /* Tell userspace we may need a wakeup call */
                        ctx->sq_ring->flags |= IORING_SQ_NEED_WAKEUP;
-                       smp_wmb();
+                       /* make sure to read SQ tail after writing flags */
+                       smp_mb();
 
                        if (!io_get_sqring(ctx, &sqes[0])) {
                                if (kthread_should_stop()) {
@@ -1920,6 +1919,10 @@ static int io_sq_thread(void *data)
                unuse_mm(cur_mm);
                mmput(cur_mm);
        }
+
+       if (kthread_should_park())
+               kthread_parkme();
+
        return 0;
 }
 
@@ -2054,6 +2057,7 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
        if (ctx->sqo_thread) {
                ctx->sqo_stop = 1;
                mb();
+               kthread_park(ctx->sqo_thread);
                kthread_stop(ctx->sqo_thread);
                ctx->sqo_thread = NULL;
        }
@@ -2236,10 +2240,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
        mmgrab(current->mm);
        ctx->sqo_mm = current->mm;
 
-       ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
-       if (!ctx->sq_thread_idle)
-               ctx->sq_thread_idle = HZ;
-
        ret = -EINVAL;
        if (!cpu_possible(p->sq_thread_cpu))
                goto err;
@@ -2249,10 +2249,18 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
                if (!capable(CAP_SYS_ADMIN))
                        goto err;
 
+               ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
+               if (!ctx->sq_thread_idle)
+                       ctx->sq_thread_idle = HZ;
+
                if (p->flags & IORING_SETUP_SQ_AFF) {
                        int cpu;
 
                        cpu = array_index_nospec(p->sq_thread_cpu, NR_CPUS);
+                       ret = -EINVAL;
+                       if (!cpu_possible(p->sq_thread_cpu))
+                               goto err;
+
                        ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
                                                        ctx, cpu,
                                                        "io_uring-sq");
@@ -2567,7 +2575,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
        poll_wait(file, &ctx->cq_wait, wait);
        /* See comment at the top of this file */
        smp_rmb();
-       if (READ_ONCE(ctx->sq_ring->r.tail) + 1 != ctx->cached_sq_head)
+       if (READ_ONCE(ctx->sq_ring->r.tail) - ctx->cached_sq_head !=
+           ctx->sq_ring->ring_entries)
                mask |= EPOLLOUT | EPOLLWRNORM;
        if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
                mask |= EPOLLIN | EPOLLRDNORM;
@@ -2922,11 +2931,31 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries,
 
 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
                               void __user *arg, unsigned nr_args)
+       __releases(ctx->uring_lock)
+       __acquires(ctx->uring_lock)
 {
        int ret;
 
+       /*
+        * We're inside the ring mutex, if the ref is already dying, then
+        * someone else killed the ctx or is already going through
+        * io_uring_register().
+        */
+       if (percpu_ref_is_dying(&ctx->refs))
+               return -ENXIO;
+
        percpu_ref_kill(&ctx->refs);
+
+       /*
+        * Drop uring mutex before waiting for references to exit. If another
+        * thread is currently inside io_uring_enter() it might need to grab
+        * the uring_lock to make progress. If we hold it here across the drain
+        * wait, then we can deadlock. It's safe to drop the mutex here, since
+        * no new references will come in after we've killed the percpu ref.
+        */
+       mutex_unlock(&ctx->uring_lock);
        wait_for_completion(&ctx->ctx_done);
+       mutex_lock(&ctx->uring_lock);
 
        switch (opcode) {
        case IORING_REGISTER_BUFFERS:
index 8f933e84cec18221f4645b769ea9d1914bc3e627..9bc32af4e2daff14817c4306833009c1d9ab92aa 100644 (file)
@@ -442,7 +442,9 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
        struct nfsd3_readdirargs *argp = rqstp->rq_argp;
        struct nfsd3_readdirres  *resp = rqstp->rq_resp;
        __be32          nfserr;
-       int             count;
+       int             count = 0;
+       struct page     **p;
+       caddr_t         page_addr = NULL;
 
        dprintk("nfsd: READDIR(3)  %s %d bytes at %d\n",
                                SVCFH_fmt(&argp->fh),
@@ -462,7 +464,18 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
        nfserr = nfsd_readdir(rqstp, &resp->fh, (loff_t*) &argp->cookie, 
                                        &resp->common, nfs3svc_encode_entry);
        memcpy(resp->verf, argp->verf, 8);
-       resp->count = resp->buffer - argp->buffer;
+       count = 0;
+       for (p = rqstp->rq_respages + 1; p < rqstp->rq_next_page; p++) {
+               page_addr = page_address(*p);
+
+               if (((caddr_t)resp->buffer >= page_addr) &&
+                   ((caddr_t)resp->buffer < page_addr + PAGE_SIZE)) {
+                       count += (caddr_t)resp->buffer - page_addr;
+                       break;
+               }
+               count += PAGE_SIZE;
+       }
+       resp->count = count >> 2;
        if (resp->offset) {
                loff_t offset = argp->cookie;
 
index 93fea246f676ebec32213bbf3a023ea395fc01a3..8d789124ed3c18d187eea569e350e6d40a43ad7a 100644 (file)
@@ -573,6 +573,7 @@ int
 nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
 {
        struct nfsd3_readdirargs *args = rqstp->rq_argp;
+       int len;
        u32 max_blocksize = svc_max_payload(rqstp);
 
        p = decode_fh(p, &args->fh);
@@ -582,8 +583,14 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
        args->verf   = p; p += 2;
        args->dircount = ~0;
        args->count  = ntohl(*p++);
-       args->count  = min_t(u32, args->count, max_blocksize);
-       args->buffer = page_address(*(rqstp->rq_next_page++));
+       len = args->count  = min_t(u32, args->count, max_blocksize);
+
+       while (len > 0) {
+               struct page *p = *(rqstp->rq_next_page++);
+               if (!args->buffer)
+                       args->buffer = page_address(p);
+               len -= PAGE_SIZE;
+       }
 
        return xdr_argsize_check(rqstp, p);
 }
index d219159b98afc54bda6d2efee824b41487db17c0..7caa3801ce72b70de75802f0a5c1b78b1087ebb5 100644 (file)
@@ -1010,8 +1010,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
        cb->cb_seq_status = 1;
        cb->cb_status = 0;
        if (minorversion) {
-               if (!nfsd41_cb_get_slot(clp, task))
+               if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
                        return;
+               cb->cb_holds_slot = true;
        }
        rpc_call_start(task);
 }
@@ -1038,6 +1039,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
                return true;
        }
 
+       if (!cb->cb_holds_slot)
+               goto need_restart;
+
        switch (cb->cb_seq_status) {
        case 0:
                /*
@@ -1076,6 +1080,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
                        cb->cb_seq_status);
        }
 
+       cb->cb_holds_slot = false;
        clear_bit(0, &clp->cl_cb_slot_busy);
        rpc_wake_up_next(&clp->cl_cb_waitq);
        dprintk("%s: freed slot, new seqid=%d\n", __func__,
@@ -1283,6 +1288,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
        cb->cb_seq_status = 1;
        cb->cb_status = 0;
        cb->cb_need_restart = false;
+       cb->cb_holds_slot = false;
 }
 
 void nfsd4_run_cb(struct nfsd4_callback *cb)
index 6a45fb00c5fcdccabdb142096270aa6035fe2f32..f056b1d3fecd6e1d0db44b56978c23cb93300ce8 100644 (file)
@@ -265,6 +265,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
 static void
 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
 {
+       locks_delete_block(&nbl->nbl_lock);
        locks_release_private(&nbl->nbl_lock);
        kfree(nbl);
 }
@@ -293,11 +294,18 @@ remove_blocked_locks(struct nfs4_lockowner *lo)
                nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
                                        nbl_lru);
                list_del_init(&nbl->nbl_lru);
-               locks_delete_block(&nbl->nbl_lock);
                free_blocked_lock(nbl);
        }
 }
 
+static void
+nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
+{
+       struct nfsd4_blocked_lock       *nbl = container_of(cb,
+                                               struct nfsd4_blocked_lock, nbl_cb);
+       locks_delete_block(&nbl->nbl_lock);
+}
+
 static int
 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
 {
@@ -325,6 +333,7 @@ nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
 }
 
 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
+       .prepare        = nfsd4_cb_notify_lock_prepare,
        .done           = nfsd4_cb_notify_lock_done,
        .release        = nfsd4_cb_notify_lock_release,
 };
@@ -4863,7 +4872,6 @@ nfs4_laundromat(struct nfsd_net *nn)
                nbl = list_first_entry(&reaplist,
                                        struct nfsd4_blocked_lock, nbl_lru);
                list_del_init(&nbl->nbl_lru);
-               locks_delete_block(&nbl->nbl_lock);
                free_blocked_lock(nbl);
        }
 out:
index 396c76755b03b7cf08237b0bcd1b8a3f5de2b17d..9d6cb246c6c55737967011a919023fc2dad9c861 100644 (file)
@@ -70,6 +70,7 @@ struct nfsd4_callback {
        int cb_seq_status;
        int cb_status;
        bool cb_need_restart;
+       bool cb_holds_slot;
 };
 
 struct nfsd4_callback_ops {
index d653907275419435e4bad20de2f1a704b5c9d6c3..7325baa8f9d474f166c1bbef54b584a028b287fb 100644 (file)
@@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
        if (--header->nreg)
                return;
 
-       if (parent)
+       if (parent) {
                put_links(header);
-       start_unregistering(header);
+               start_unregistering(header);
+       }
+
        if (!--header->count)
                kfree_rcu(header, rcu);
 
index 92a91e7816d8472c3451a99a456f6f5b7b84c5b5..95ca1fe7283cff265247c6f3a84e5fa573299fca 100644 (file)
@@ -1143,6 +1143,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                                        count = -EINTR;
                                        goto out_mm;
                                }
+                               /*
+                                * Avoid to modify vma->vm_flags
+                                * without locked ops while the
+                                * coredump reads the vm_flags.
+                                */
+                               if (!mmget_still_valid(mm)) {
+                                       /*
+                                        * Silently return "count"
+                                        * like if get_task_mm()
+                                        * failed. FIXME: should this
+                                        * function have returned
+                                        * -ESRCH if get_task_mm()
+                                        * failed like if
+                                        * get_proc_task() fails?
+                                        */
+                                       up_write(&mm->mmap_sem);
+                                       goto out_mm;
+                               }
                                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                        vma->vm_flags &= ~VM_SOFTDIRTY;
                                        vma_set_page_prot(vma);
index 98943d9b219c0cea1037770cbc6578970fbe69b6..25212dcca2dfd6b43dc51cd8887f93038c753515 100644 (file)
@@ -330,8 +330,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
        .get = generic_pipe_buf_get,
 };
 
-static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
-                                   struct pipe_buffer *buf)
+int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
+                            struct pipe_buffer *buf)
 {
        return 1;
 }
index 89800fc7dc9d562cd3557988adc766fa41c51209..f5de1e726356a51c27ff529f98d99032650eb839 100644 (file)
@@ -629,6 +629,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
 
                /* the various vma->vm_userfaultfd_ctx still points to it */
                down_write(&mm->mmap_sem);
+               /* no task can run (and in turn coredump) yet */
+               VM_WARN_ON(!mmget_still_valid(mm));
                for (vma = mm->mmap; vma; vma = vma->vm_next)
                        if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
@@ -883,6 +885,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
         * taking the mmap_sem for writing.
         */
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto skip_mm;
        prev = NULL;
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                cond_resched();
@@ -905,6 +909,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                vma->vm_flags = new_flags;
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
+skip_mm:
        up_write(&mm->mmap_sem);
        mmput(mm);
 wakeup:
@@ -1333,6 +1338,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                goto out;
 
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
        if (!vma)
                goto out_unlock;
@@ -1520,6 +1527,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                goto out;
 
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
        if (!vma)
                goto out_unlock;
index cbf3180cb612ed7e54b9aecf79b2690c5a48aed6..668ad971cd7b26828e2d95a4813ec3888d0abdac 100644 (file)
@@ -420,7 +420,6 @@ extern struct ttm_bo_global {
        /**
         * Protected by ttm_global_mutex.
         */
-       unsigned int use_count;
        struct list_head device_list;
 
        /**
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h
new file mode 100644 (file)
index 0000000..6a0b70a
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+#define __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+
+/* Clock indexes for use by Device Tree data and the PRCI driver */
+
+#define PRCI_CLK_COREPLL              0
+#define PRCI_CLK_DDRPLL                       1
+#define PRCI_CLK_GEMGXLPLL            2
+#define PRCI_CLK_TLCLK                3
+
+#endif
index 5c58a3b2bf0038083b9dc2b88293349fa8afb22b..317ab30d29046baaa29d13d0213ef9c63c4bf95c 100644 (file)
@@ -548,7 +548,6 @@ struct request_queue {
        struct rcu_head         rcu_head;
        wait_queue_head_t       mq_freeze_wq;
        struct percpu_ref       q_usage_counter;
-       struct list_head        all_q_node;
 
        struct blk_mq_tag_set   *tag_set;
        struct list_head        tag_set_list;
index 3bc91879e1e2b09ced375d4540e97094ae593f66..ff13cbc1887db7a3e2d4fe9f64916c34a8a44a49 100644 (file)
@@ -160,8 +160,9 @@ static inline void bvec_advance(const struct bio_vec *bvec,
                bv->bv_page = nth_page(bv->bv_page, 1);
                bv->bv_offset = 0;
        } else {
-               bv->bv_page = bvec->bv_page;
-               bv->bv_offset = bvec->bv_offset;
+               bv->bv_page = bvec_nth_page(bvec->bv_page, bvec->bv_offset /
+                                           PAGE_SIZE);
+               bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
        }
        bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
                           bvec->bv_len - iter_all->done);
index b160e98076e3b896fabffd6a78fcfb62501e1fdb..684caf067003b0b3c45424f914e821f6797531ca 100644 (file)
@@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
 #endif
 
+static inline bool policy_is_inactive(struct cpufreq_policy *policy)
+{
+       return cpumask_empty(policy->cpus);
+}
+
 static inline bool policy_is_shared(struct cpufreq_policy *policy)
 {
        return cpumask_weight(policy->cpus) > 1;
@@ -193,8 +198,14 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu);
 void disable_cpufreq(void);
 
 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
+
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
+void cpufreq_cpu_release(struct cpufreq_policy *policy);
 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+int cpufreq_set_policy(struct cpufreq_policy *policy,
+                      struct cpufreq_policy *new_policy);
 void cpufreq_update_policy(unsigned int cpu);
+void cpufreq_update_limits(unsigned int cpu);
 bool have_governor_per_policy(void);
 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
@@ -322,6 +333,9 @@ struct cpufreq_driver {
        /* should be defined, if possible */
        unsigned int    (*get)(unsigned int cpu);
 
+       /* Called to update policy limits on firmware notifications. */
+       void            (*update_limits)(unsigned int cpu);
+
        /* optional */
        int             (*bios_limit)(int cpu, unsigned int *limit);
 
index e78281d07b70b39ac80de10bf8ef8c6f1cdd3605..dbfdd0fadbeff2013df4419e46895c76af383ea3 100644 (file)
@@ -147,6 +147,7 @@ enum cpuhp_state {
        CPUHP_AP_X86_VDSO_VMA_ONLINE,
        CPUHP_AP_IRQ_AFFINITY_ONLINE,
        CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
+       CPUHP_AP_X86_INTEL_EPB_ONLINE,
        CPUHP_AP_PERF_ONLINE,
        CPUHP_AP_PERF_X86_ONLINE,
        CPUHP_AP_PERF_X86_UNCORE_ONLINE,
index 3b39472324a31c4580746300f348fa69ed8efdc3..bb9a0db89f1ab8e8d5387c93e4a98e81cba5da60 100644 (file)
@@ -83,6 +83,7 @@ struct cpuidle_device {
        unsigned int            use_deepest_state:1;
        unsigned int            poll_time_limit:1;
        unsigned int            cpu;
+       ktime_t                 next_hrtimer;
 
        int                     last_residency;
        struct cpuidle_state_usage      states_usage[CPUIDLE_STATE_MAX];
index 54357a258b358aa3961151c025cfef621ce5cbef..6ebc2098cfe1719a16ba177c567ed5a916955bb4 100644 (file)
@@ -1611,7 +1611,12 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
                           struct screen_info *si, efi_guid_t *proto,
                           unsigned long size);
 
-bool efi_runtime_disabled(void);
+#ifdef CONFIG_EFI
+extern bool efi_runtime_disabled(void);
+#else
+static inline bool efi_runtime_disabled(void) { return true; }
+#endif
+
 extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
 extern unsigned long efi_call_virt_save_flags(void);
 
index 2e9e2763bf47dbea239976034e32fd11f14826f7..6e8bc53740f050f63883ea6b7d077e5911bdca9f 100644 (file)
@@ -31,6 +31,7 @@ struct elevator_mq_ops {
        void (*exit_sched)(struct elevator_queue *);
        int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
        void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+       void (*depth_updated)(struct blk_mq_hw_ctx *);
 
        bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
        bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
index e2f3b21cd72a28d16cf2324d308e13dc64c86f59..aa8bfd6f738c7fac838b31de87ec390cf0a81d3c 100644 (file)
@@ -448,6 +448,18 @@ static inline void eth_addr_dec(u8 *addr)
        u64_to_ether_addr(u, addr);
 }
 
+/**
+ * eth_addr_inc() - Increment the given MAC address.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_inc(u8 *addr)
+{
+       u64 u = ether_addr_to_u64(addr);
+
+       u++;
+       u64_to_ether_addr(u, addr);
+}
+
 /**
  * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
  * @dev: Pointer to a device structure
index 201f0f2683f25bd382267042f9f7dbec8460f093..9a897256e481f311a1de41e448c52710d2c2f247 100644 (file)
@@ -173,6 +173,7 @@ struct kretprobe_instance {
        struct kretprobe *rp;
        kprobe_opcode_t *ret_addr;
        struct task_struct *task;
+       void *fp;
        char data[0];
 };
 
index 9d55c63db09b5dcb9ac997d802cb00ff356d4353..640a03642766bb4ae02c86e3606318c80adaf81d 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/irqbypass.h>
 #include <linux/swait.h>
 #include <linux/refcount.h>
+#include <linux/nospec.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -513,10 +514,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
 
 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 {
-       /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
-        * the caller has read kvm->online_vcpus before (as is the case
-        * for kvm_for_each_vcpu, for example).
-        */
+       int num_vcpus = atomic_read(&kvm->online_vcpus);
+       i = array_index_nospec(i, num_vcpus);
+
+       /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
        smp_rmb();
        return kvm->vcpus[i];
 }
@@ -600,6 +601,7 @@ void kvm_put_kvm(struct kvm *kvm);
 
 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 {
+       as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
        return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
                        lockdep_is_held(&kvm->slots_lock) ||
                        !refcount_read(&kvm->users_count));
index 26f69cf763f43dd1e0d61c0692f649aab21baeea..324e872c91d15407b6804c297de56cd480b215f8 100644 (file)
@@ -1500,6 +1500,7 @@ struct net_device_ops {
  * @IFF_FAILOVER: device is a failover master device
  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
  * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
+ * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
  */
 enum netdev_priv_flags {
        IFF_802_1Q_VLAN                 = 1<<0,
@@ -1532,6 +1533,7 @@ enum netdev_priv_flags {
        IFF_FAILOVER                    = 1<<27,
        IFF_FAILOVER_SLAVE              = 1<<28,
        IFF_L3MDEV_RX_HANDLER           = 1<<29,
+       IFF_LIVE_RENAME_OK              = 1<<30,
 };
 
 #define IFF_802_1Q_VLAN                        IFF_802_1Q_VLAN
@@ -1563,6 +1565,7 @@ enum netdev_priv_flags {
 #define IFF_FAILOVER                   IFF_FAILOVER
 #define IFF_FAILOVER_SLAVE             IFF_FAILOVER_SLAVE
 #define IFF_L3MDEV_RX_HANDLER          IFF_L3MDEV_RX_HANDLER
+#define IFF_LIVE_RENAME_OK             IFF_LIVE_RENAME_OK
 
 /**
  *     struct net_device - The DEVICE structure.
index abb2dac3da9b9cf69b477c2d3726e019a0352b7a..5c626fdc10dbd27d6f87f290cf5dbd50d0244528 100644 (file)
@@ -176,6 +176,7 @@ void free_pipe_info(struct pipe_inode_info *);
 bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
 
index 1ed5874bcee09afef28c505574bbd2d213abd785..0e8e356bed6a406725dbc55e5d0dae72e419105c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/of.h>
 #include <linux/notifier.h>
 #include <linux/spinlock.h>
+#include <linux/cpumask.h>
 
 /*
  * Flags to control the behaviour of a genpd.
  * GENPD_FLAG_ACTIVE_WAKEUP:   Instructs genpd to keep the PM domain powered
  *                             on, in case any of its attached devices is used
  *                             in the wakeup path to serve system wakeups.
+ *
+ * GENPD_FLAG_CPU_DOMAIN:      Instructs genpd that it should expect to get
+ *                             devices attached, which may belong to CPUs or
+ *                             possibly have subdomains with CPUs attached.
+ *                             This flag enables the genpd backend driver to
+ *                             deploy idle power management support for CPUs
+ *                             and groups of CPUs. Note that, the backend
+ *                             driver must then comply with the so called,
+ *                             last-man-standing algorithm, for the CPUs in the
+ *                             PM domain.
  */
 #define GENPD_FLAG_PM_CLK       (1U << 0)
 #define GENPD_FLAG_IRQ_SAFE     (1U << 1)
 #define GENPD_FLAG_ALWAYS_ON    (1U << 2)
 #define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
+#define GENPD_FLAG_CPU_DOMAIN   (1U << 4)
 
 enum gpd_status {
        GPD_STATE_ACTIVE = 0,   /* PM domain is active */
@@ -69,6 +81,7 @@ struct genpd_power_state {
        s64 residency_ns;
        struct fwnode_handle *fwnode;
        ktime_t idle_time;
+       void *data;
 };
 
 struct genpd_lock_ops;
@@ -93,6 +106,7 @@ struct generic_pm_domain {
        unsigned int suspended_count;   /* System suspend device counter */
        unsigned int prepared_count;    /* Suspend counter of prepared devices */
        unsigned int performance_state; /* Aggregated max performance state */
+       cpumask_var_t cpus;             /* A cpumask of the attached CPUs */
        int (*power_off)(struct generic_pm_domain *domain);
        int (*power_on)(struct generic_pm_domain *domain);
        struct opp_table *opp_table;    /* OPP table of the genpd */
@@ -104,15 +118,17 @@ struct generic_pm_domain {
        s64 max_off_time_ns;    /* Maximum allowed "suspended" time. */
        bool max_off_time_changed;
        bool cached_power_down_ok;
+       bool cached_power_down_state_idx;
        int (*attach_dev)(struct generic_pm_domain *domain,
                          struct device *dev);
        void (*detach_dev)(struct generic_pm_domain *domain,
                           struct device *dev);
        unsigned int flags;             /* Bit field of configs for genpd */
        struct genpd_power_state *states;
+       void (*free_states)(struct genpd_power_state *states,
+                           unsigned int state_count);
        unsigned int state_count; /* number of states */
        unsigned int state_idx; /* state that genpd will go to when off */
-       void *free; /* Free the state that was allocated for default */
        ktime_t on_time;
        ktime_t accounting_time;
        const struct genpd_lock_ops *lock_ops;
@@ -159,6 +175,7 @@ struct generic_pm_domain_data {
        struct pm_domain_data base;
        struct gpd_timing_data td;
        struct notifier_block nb;
+       int cpu;
        unsigned int performance_state;
        void *data;
 };
@@ -187,6 +204,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
 
 extern struct dev_power_governor simple_qos_governor;
 extern struct dev_power_governor pm_domain_always_on_gov;
+#ifdef CONFIG_CPU_IDLE
+extern struct dev_power_governor pm_domain_cpu_gov;
+#endif
 #else
 
 static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
index 24c757a32a7b1c82777a0cccc008c0b3346858c5..b150fe97ce5ae3d077dc960447fb6ffd4540a23b 100644 (file)
@@ -102,6 +102,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
 
 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
                                              unsigned long *freq);
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
+                                                    unsigned long u_volt);
 
 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
                                             unsigned long *freq);
@@ -207,6 +209,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
        return ERR_PTR(-ENOTSUPP);
 }
 
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
+                                       unsigned long u_volt)
+{
+       return ERR_PTR(-ENOTSUPP);
+}
+
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
                                        unsigned long *freq)
 {
index 0cd9f10423fb8e60645685ab5bdbad675d803a51..a3fda9f024c3c1988b6ff60954d7f7e74a9c1ecf 100644 (file)
@@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm)
                __mmdrop(mm);
 }
 
+/*
+ * This has to be called after a get_task_mm()/mmget_not_zero()
+ * followed by taking the mmap_sem for writing before modifying the
+ * vmas or anything the coredump pretends not to change from under it.
+ *
+ * NOTE: find_extend_vma() called from GUP context is the only place
+ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * for reading and outside the context of the process, so it is also
+ * the only case that holds the mmap_sem for reading that must call
+ * this function. Generally if the mmap_sem is hold for reading
+ * there's no need of this check after get_task_mm()/mmget_not_zero().
+ *
+ * This function can be obsoleted and the check can be removed, after
+ * the coredump code will hold the mmap_sem for writing before
+ * invoking the ->core_dump methods.
+ */
+static inline bool mmget_still_valid(struct mm_struct *mm)
+{
+       return likely(!mm->core_state);
+}
+
 /**
  * mmget() - Pin the address space associated with a &struct mm_struct.
  * @mm: The address space to pin.
index f3fb1edb3526ddc0c582f0ad32017ab7eaf21dd3..20d815a331454f93e7a66d808a5a5f84601e9a58 100644 (file)
@@ -21,6 +21,7 @@ struct shmem_inode_info {
        struct list_head        swaplist;       /* chain of maybes on swap */
        struct shared_policy    policy;         /* NUMA memory alloc policy */
        struct simple_xattrs    xattrs;         /* list of xattrs */
+       atomic_t                stop_eviction;  /* hold when working on inode */
        struct inode            vfs_inode;
 };
 
index 3f529ad9a9d2ec33bf8fcdfa72bb9ddce46a292a..6b3ea9ea6a9e09e313f28a0df567c3a7a45a1b99 100644 (file)
@@ -425,6 +425,7 @@ void restore_processor_state(void);
 /* kernel/power/main.c */
 extern int register_pm_notifier(struct notifier_block *nb);
 extern int unregister_pm_notifier(struct notifier_block *nb);
+extern void ksys_sync_helper(void);
 
 #define pm_notifier(fn, pri) {                         \
        static struct notifier_block fn##_nb =                  \
@@ -462,6 +463,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
        return 0;
 }
 
+static inline void ksys_sync_helper(void) {}
+
 #define pm_notifier(fn, pri)   do { (void)(fn); } while (0)
 
 static inline bool pm_wakeup_pending(void) { return false; }
index 55388ab45fd4d474984beb5e4049471f8d29f646..8891b5ac3e4031887e4e98d74831bb0e2d71914d 100644 (file)
@@ -122,6 +122,7 @@ extern void tick_nohz_idle_enter(void);
 extern void tick_nohz_idle_exit(void);
 extern void tick_nohz_irq_exit(void);
 extern bool tick_nohz_idle_got_tick(void);
+extern ktime_t tick_nohz_get_next_hrtimer(void);
 extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
 extern unsigned long tick_nohz_get_idle_calls(void);
 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
@@ -145,7 +146,11 @@ static inline void tick_nohz_idle_restart_tick(void) { }
 static inline void tick_nohz_idle_enter(void) { }
 static inline void tick_nohz_idle_exit(void) { }
 static inline bool tick_nohz_idle_got_tick(void) { return false; }
-
+static inline ktime_t tick_nohz_get_next_hrtimer(void)
+{
+       /* Next wake up is the tick period, assume it starts now */
+       return ktime_add(ktime_get(), TICK_NSEC);
+}
 static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
 {
        *delta_next = TICK_NSEC;
index 2bfb87eb98ce15cd693819d42205a036ae6dd42f..78c856cba4f538c078fada09ef3238c2bc220069 100644 (file)
@@ -61,10 +61,12 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
                               rxrpc_user_attach_call_t, unsigned long, gfp_t,
                               unsigned int);
 void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
+                            u32 *);
 void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
 u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
 bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
                                 ktime_t *);
+bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
 
 #endif /* _NET_RXRPC_H */
index bb307a11ee636b7194bbe7d31c83f3d798b3379a..13bfeb712d36943cf9c04111a777d39cf08034a9 100644 (file)
@@ -7183,6 +7183,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
 #define wiphy_info(wiphy, format, args...)                     \
        dev_info(&(wiphy)->dev, format, ##args)
 
+#define wiphy_err_ratelimited(wiphy, format, args...)          \
+       dev_err_ratelimited(&(wiphy)->dev, format, ##args)
+#define wiphy_warn_ratelimited(wiphy, format, args...)         \
+       dev_warn_ratelimited(&(wiphy)->dev, format, ##args)
+
 #define wiphy_debug(wiphy, format, args...)                    \
        wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
 
index ac2ed8ec662bd97ebe0337085e78e5a61906d499..112dc18c658f15f79525cae64afa0ea38e2a1159 100644 (file)
@@ -6231,8 +6231,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @ac: AC number to return packets from.
  *
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
  * Returns the next txq if successful, %NULL if no queue is eligible. If a txq
  * is returned, it should be returned with ieee80211_return_txq() after the
  * driver has finished scheduling it.
@@ -6240,51 +6238,58 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
 
 /**
- * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
- *
- * @hw: pointer as obtained from ieee80211_alloc_hw()
- * @txq: pointer obtained from station or virtual interface
- *
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
- */
-void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
-
-/**
- * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
+ * ieee80211_txq_schedule_start - start new scheduling round for TXQs
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @ac: AC number to acquire locks for
  *
- * Acquire locks needed to schedule TXQs from the given AC. Should be called
- * before ieee80211_next_txq() or ieee80211_return_txq().
+ * Should be called before ieee80211_next_txq() or ieee80211_return_txq().
+ * The driver must not call multiple TXQ scheduling rounds concurrently.
  */
-void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
-       __acquires(txq_lock);
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac);
+
+/* (deprecated) */
+static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
+{
+}
+
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq, bool force);
 
 /**
- * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
+ * ieee80211_schedule_txq - schedule a TXQ for transmission
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
- * @ac: AC number to acquire locks for
+ * @txq: pointer obtained from station or virtual interface
  *
- * Release locks previously acquired by ieee80211_txq_schedule_end().
+ * Schedules a TXQ for transmission if it is not already scheduled,
+ * even if mac80211 does not have any packets buffered.
+ *
+ * The driver may call this function if it has buffered packets for
+ * this TXQ internally.
  */
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
-       __releases(txq_lock);
+static inline void
+ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+       __ieee80211_schedule_txq(hw, txq, true);
+}
 
 /**
- * ieee80211_schedule_txq - schedule a TXQ for transmission
+ * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @txq: pointer obtained from station or virtual interface
+ * @force: schedule txq even if mac80211 does not have any buffered packets.
  *
- * Schedules a TXQ for transmission if it is not already scheduled. Takes a
- * lock, which means it must *not* be called between
- * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end()
+ * The driver may set force=true if it has buffered packets for this TXQ
+ * internally.
  */
-void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
-       __acquires(txq_lock) __releases(txq_lock);
+static inline void
+ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+                    bool force)
+{
+       __ieee80211_schedule_txq(hw, txq, force);
+}
 
 /**
  * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
index 5ee7b30b4917244a0f74d28e93c05e1c3fc59a04..d2bc733a2ef1edf2ee7159457b1f33a676e74a98 100644 (file)
@@ -316,6 +316,8 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
                                 gfp_t flags);
 void nf_ct_tmpl_free(struct nf_conn *tmpl);
 
+u32 nf_ct_get_id(const struct nf_conn *ct);
+
 static inline void
 nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
 {
index 778087591983dd2a274ef9aa75d9b09caf78e8af..a49edfdf47e83ece9945d8978dc3e260a5d9d7e0 100644 (file)
@@ -75,6 +75,12 @@ bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
 bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
                                      const struct nf_conntrack_tuple *orig);
 
+int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
+                           unsigned int dataoff,
+                           const struct nf_hook_state *state,
+                           u8 l4proto,
+                           union nf_inet_addr *outer_daddr);
+
 int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
                              struct sk_buff *skb,
                              unsigned int dataoff,
index 5a0714ff500fd09bd288360a83dad57952e5efaf..80f15b1c1a489a71479845ae0d077875b1a52f66 100644 (file)
@@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
 int nr_t1timer_running(struct sock *);
 
 /* sysctl_net_netrom.c */
-void nr_register_sysctl(void);
+int nr_register_sysctl(void);
 void nr_unregister_sysctl(void);
 
 #endif
index 8de5ee258b93a50b2fdcde796bae3a5b53ce4d6a..341f8bafa0cf585fc72e5819054f1b2f15a8e338 100644 (file)
@@ -2084,12 +2084,6 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
  * @p:              poll_table
  *
  * See the comments in the wq_has_sleeper function.
- *
- * Do not derive sock from filp->private_data here. An SMC socket establishes
- * an internal TCP socket that is used in the fallback case. All socket
- * operations on the SMC socket are then forwarded to the TCP socket. In case of
- * poll, the filp->private_data pointer references the SMC socket because the
- * TCP socket has no file assigned.
  */
 static inline void sock_poll_wait(struct file *filp, struct socket *sock,
                                  poll_table *p)
index a5a938583295c0789df287c737b7a1c87556c9f1..5934246b2c6f4bafbe318fdddfacb328a2b9bf5c 100644 (file)
@@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 int tls_device_sendpage(struct sock *sk, struct page *page,
                        int offset, size_t size, int flags);
 void tls_device_sk_destruct(struct sock *sk);
+void tls_device_free_resources_tx(struct sock *sk);
 void tls_device_init(void);
 void tls_device_cleanup(void);
 int tls_tx_records(struct sock *sk, int flags);
@@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
                int flags);
 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
                            int flags);
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
 
 static inline struct tls_msg *tls_msg(struct sk_buff *skb)
 {
@@ -379,7 +381,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
 {
 #ifdef CONFIG_SOCK_VALIDATE_XMIT
-       return sk_fullsock(sk) &
+       return sk_fullsock(sk) &&
               (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
               &tls_validate_xmit_skb);
 #else
index 7f14d4a66c28c1c13d1388c6dacfcff30711edab..64cee116928ebd92d97acec08c273e02c89958ab 100644 (file)
 #define KEY_TITLE              0x171
 #define KEY_SUBTITLE           0x172
 #define KEY_ANGLE              0x173
-#define KEY_ZOOM               0x174
+#define KEY_FULL_SCREEN                0x174   /* AC View Toggle */
+#define KEY_ZOOM               KEY_FULL_SCREEN
 #define KEY_MODE               0x175
 #define KEY_KEYBOARD           0x176
-#define KEY_SCREEN             0x177
+#define KEY_ASPECT_RATIO       0x177   /* HUTRR37: Aspect */
+#define KEY_SCREEN             KEY_ASPECT_RATIO
 #define KEY_PC                 0x178   /* Media Select Computer */
 #define KEY_TV                 0x179   /* Media Select TV */
 #define KEY_TV2                        0x17a   /* Media Select Cable */
index b3bcabe380da8cf1c1e069bc98945bae1505551f..2fcad1dd0b0e00a68c37c345697fa133a996c054 100644 (file)
 
 #define PSCI_1_0_FN_PSCI_FEATURES              PSCI_0_2_FN(10)
 #define PSCI_1_0_FN_SYSTEM_SUSPEND             PSCI_0_2_FN(14)
+#define PSCI_1_0_FN_SET_SUSPEND_MODE           PSCI_0_2_FN(15)
+#define PSCI_1_1_FN_SYSTEM_RESET2              PSCI_0_2_FN(18)
 
 #define PSCI_1_0_FN64_SYSTEM_SUSPEND           PSCI_0_2_FN64(14)
+#define PSCI_1_1_FN64_SYSTEM_RESET2            PSCI_0_2_FN64(18)
 
 /* PSCI v0.2 power state encoding for CPU_SUSPEND function */
 #define PSCI_0_2_POWER_STATE_ID_MASK           0xffff
 #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK  \
                        (0x1 << PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT)
 
+#define PSCI_1_0_OS_INITIATED                  BIT(0)
+#define PSCI_1_0_SUSPEND_MODE_PC               0
+#define PSCI_1_0_SUSPEND_MODE_OSI              1
+
 /* PSCI return values (inclusive of all PSCI versions) */
 #define PSCI_RET_SUCCESS                       0
 #define PSCI_RET_NOT_SUPPORTED                 -1
index 87b3198f4b5d7aa02c330f59178d8cc74dba5f6c..f4d4010b7e3e54f2bfc1d64a708c8454b5899b68 100644 (file)
@@ -238,6 +238,7 @@ enum mlx5_ib_query_dev_resp_flags {
        MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
        MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD  = 1 << 1,
        MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
+       MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
 };
 
 enum mlx5_ib_tunnel_offloads {
index 598e278b46f743d777e6f9375ac1c932896ef99c..7d4025d665eb95ee439ddb4e5e564aff8fa09c24 100644 (file)
@@ -582,6 +582,8 @@ asmlinkage __visible void __init start_kernel(void)
        page_alloc_init();
 
        pr_notice("Kernel command line: %s\n", boot_command_line);
+       /* parameters may set static keys */
+       jump_label_init();
        parse_early_param();
        after_dashes = parse_args("Booting kernel",
                                  static_command_line, __start___param,
@@ -591,8 +593,6 @@ asmlinkage __visible void __init start_kernel(void)
                parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
                           NULL, set_init_arg);
 
-       jump_label_init();
-
        /*
         * These use large bootmem allocations and must precede
         * kmem_cache_init()
index 534e01e7bc36854877d3361fcf2683ab09025d91..dc7dead2d2cc2cfcd0448643650a878b90653a21 100644 (file)
@@ -9077,26 +9077,29 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        if (task == TASK_TOMBSTONE)
                return;
 
-       if (!ifh->nr_file_filters)
-               return;
-
-       mm = get_task_mm(event->ctx->task);
-       if (!mm)
-               goto restart;
+       if (ifh->nr_file_filters) {
+               mm = get_task_mm(event->ctx->task);
+               if (!mm)
+                       goto restart;
 
-       down_read(&mm->mmap_sem);
+               down_read(&mm->mmap_sem);
+       }
 
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
-               event->addr_filter_ranges[count].start = 0;
-               event->addr_filter_ranges[count].size = 0;
+               if (filter->path.dentry) {
+                       /*
+                        * Adjust base offset if the filter is associated to a
+                        * binary that needs to be mapped:
+                        */
+                       event->addr_filter_ranges[count].start = 0;
+                       event->addr_filter_ranges[count].size = 0;
 
-               /*
-                * Adjust base offset if the filter is associated to a binary
-                * that needs to be mapped:
-                */
-               if (filter->path.dentry)
                        perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
+               } else {
+                       event->addr_filter_ranges[count].start = filter->offset;
+                       event->addr_filter_ranges[count].size  = filter->size;
+               }
 
                count++;
        }
@@ -9104,9 +9107,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        event->addr_filters_gen++;
        raw_spin_unlock_irqrestore(&ifh->lock, flags);
 
-       up_read(&mm->mmap_sem);
+       if (ifh->nr_file_filters) {
+               up_read(&mm->mmap_sem);
 
-       mmput(mm);
+               mmput(mm);
+       }
 
 restart:
        perf_event_stop(event, 1);
index 2545ac08cc77b0bb1df9df04be353bbe6a7a0575..5eedb49a65ea2c8f6e7ea8db916b1320806df0c9 100644 (file)
@@ -455,24 +455,21 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
                rb->aux_head += size;
        }
 
-       if (size || handle->aux_flags) {
-               /*
-                * Only send RECORD_AUX if we have something useful to communicate
-                *
-                * Note: the OVERWRITE records by themselves are not considered
-                * useful, as they don't communicate any *new* information,
-                * aside from the short-lived offset, that becomes history at
-                * the next event sched-in and therefore isn't useful.
-                * The userspace that needs to copy out AUX data in overwrite
-                * mode should know to use user_page::aux_head for the actual
-                * offset. So, from now on we don't output AUX records that
-                * have *only* OVERWRITE flag set.
-                */
-
-               if (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE)
-                       perf_event_aux_event(handle->event, aux_head, size,
-                                            handle->aux_flags);
-       }
+       /*
+        * Only send RECORD_AUX if we have something useful to communicate
+        *
+        * Note: the OVERWRITE records by themselves are not considered
+        * useful, as they don't communicate any *new* information,
+        * aside from the short-lived offset, that becomes history at
+        * the next event sched-in and therefore isn't useful.
+        * The userspace that needs to copy out AUX data in overwrite
+        * mode should know to use user_page::aux_head for the actual
+        * offset. So, from now on we don't output AUX records that
+        * have *only* OVERWRITE flag set.
+        */
+       if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
+               perf_event_aux_event(handle->event, aux_head, size,
+                                    handle->aux_flags);
 
        rb->user_page->aux_head = rb->aux_head;
        if (rb_need_aux_wakeup(rb))
index c83e547271312e9f32ed5447fdb9eef1b8e2c2e7..b1ea30a5540e9e1af5f1c86e5590195b6f00bc9d 100644 (file)
@@ -709,7 +709,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 static int reuse_unused_kprobe(struct kprobe *ap)
 {
        struct optimized_kprobe *op;
-       int ret;
 
        /*
         * Unused kprobe MUST be on the way of delayed unoptimizing (means
@@ -720,9 +719,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
        /* Enable the probe again */
        ap->flags &= ~KPROBE_FLAG_DISABLED;
        /* Optimize it again (remove from op->list) */
-       ret = kprobe_optready(ap);
-       if (ret)
-               return ret;
+       if (!kprobe_optready(ap))
+               return -EINVAL;
 
        optimize_kprobe(ap);
        return 0;
index e16766ff184b5e57d636e987a70a1147d3b3ecb2..e221be724fe82f0dbf7dc0426041b3ab6e5f01cf 100644 (file)
@@ -4907,8 +4907,9 @@ void lockdep_unregister_key(struct lock_class_key *key)
                return;
 
        raw_local_irq_save(flags);
-       arch_spin_lock(&lockdep_lock);
-       current->lockdep_recursion = 1;
+       if (!graph_lock())
+               goto out_irq;
+
        pf = get_pending_free();
        hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
                if (k == key) {
@@ -4920,8 +4921,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
        WARN_ON_ONCE(!found);
        __lockdep_free_key_range(pf, key, 1);
        call_rcu_zapped(pf);
-       current->lockdep_recursion = 0;
-       arch_spin_unlock(&lockdep_lock);
+       graph_unlock();
+out_irq:
        raw_local_irq_restore(flags);
 
        /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
index abef759de7c8fb4a8ece278fd7b7730d5b5e41ab..cc105ecd9c07e8948773b3ae3e378b2518a7592c 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <linux/export.h>
 #include <linux/suspend.h>
-#include <linux/syscalls.h>
 #include <linux/reboot.h>
 #include <linux/string.h>
 #include <linux/device.h>
@@ -709,9 +708,7 @@ int hibernate(void)
                goto Exit;
        }
 
-       pr_info("Syncing filesystems ... \n");
-       ksys_sync();
-       pr_info("done.\n");
+       ksys_sync_helper();
 
        error = freeze_processes();
        if (error)
index 98e76cad128b81c37a8a8f9592884fd9daf95c56..4f43e724f6eb34cb61ae649cad563e61115e8a8c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/suspend.h>
+#include <linux/syscalls.h>
 
 #include "power.h"
 
@@ -51,6 +52,19 @@ void unlock_system_sleep(void)
 }
 EXPORT_SYMBOL_GPL(unlock_system_sleep);
 
+void ksys_sync_helper(void)
+{
+       ktime_t start;
+       long elapsed_msecs;
+
+       start = ktime_get();
+       ksys_sync();
+       elapsed_msecs = ktime_to_ms(ktime_sub(ktime_get(), start));
+       pr_info("Filesystems sync: %ld.%03ld seconds\n",
+               elapsed_msecs / MSEC_PER_SEC, elapsed_msecs % MSEC_PER_SEC);
+}
+EXPORT_SYMBOL_GPL(ksys_sync_helper);
+
 /* Routines for PM-transition notifications */
 
 static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
index 0bd595a0b6103c56439871b765ef0d4fb5ac672b..e39059dea38b4aefa8f1a1ae3252414cfe52f9dc 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/cpuidle.h>
-#include <linux/syscalls.h>
 #include <linux/gfp.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
@@ -568,13 +567,11 @@ static int enter_state(suspend_state_t state)
        if (state == PM_SUSPEND_TO_IDLE)
                s2idle_begin();
 
-#ifndef CONFIG_SUSPEND_SKIP_SYNC
-       trace_suspend_resume(TPS("sync_filesystems"), 0, true);
-       pr_info("Syncing filesystems ... ");
-       ksys_sync();
-       pr_cont("done.\n");
-       trace_suspend_resume(TPS("sync_filesystems"), 0, false);
-#endif
+       if (!IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC)) {
+               trace_suspend_resume(TPS("sync_filesystems"), 0, true);
+               ksys_sync_helper();
+               trace_suspend_resume(TPS("sync_filesystems"), 0, false);
+       }
 
        pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
        pm_suspend_clear_flags();
index 2d8b60a3c86b9f768cbe74e82f843c776a00619c..cb24e840a3e6bec098ffc67eae3262c0ef20c6ed 100644 (file)
@@ -10,7 +10,6 @@
  */
 
 #include <linux/suspend.h>
-#include <linux/syscalls.h>
 #include <linux/reboot.h>
 #include <linux/string.h>
 #include <linux/device.h>
@@ -228,9 +227,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                if (data->frozen)
                        break;
 
-               printk("Syncing filesystems ... ");
-               ksys_sync();
-               printk("done.\n");
+               ksys_sync_helper();
 
                error = freeze_processes();
                if (error)
index 5c41ea3674223616b0fb4887681d895748fca4f3..b3a878aa593d16d32ac806c06cd0d48bbedd7d4b 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/sched/cpufreq.h>
 #include <trace/events/power.h>
 
+#define IOWAIT_BOOST_MIN       (SCHED_CAPACITY_SCALE / 8)
+
 struct sugov_tunables {
        struct gov_attr_set     attr_set;
        unsigned int            rate_limit_us;
@@ -51,7 +53,6 @@ struct sugov_cpu {
        u64                     last_update;
 
        unsigned long           bw_dl;
-       unsigned long           min;
        unsigned long           max;
 
        /* The field below is for single-CPU policies only: */
@@ -291,8 +292,8 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
  *
  * The IO wait boost of a task is disabled after a tick since the last update
  * of a CPU. If a new IO wait boost is requested after more then a tick, then
- * we enable the boost starting from the minimum frequency, which improves
- * energy efficiency by ignoring sporadic wakeups from IO.
+ * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
+ * efficiency by ignoring sporadic wakeups from IO.
  */
 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
                               bool set_iowait_boost)
@@ -303,7 +304,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
        if (delta_ns <= TICK_NSEC)
                return false;
 
-       sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
+       sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
        sg_cpu->iowait_boost_pending = set_iowait_boost;
 
        return true;
@@ -317,8 +318,9 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
  *
  * Each time a task wakes up after an IO operation, the CPU utilization can be
  * boosted to a certain utilization which doubles at each "frequent and
- * successive" wakeup from IO, ranging from the utilization of the minimum
- * OPP to the utilization of the maximum OPP.
+ * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
+ * of the maximum OPP.
+ *
  * To keep doubling, an IO boost has to be requested at least once per tick,
  * otherwise we restart from the utilization of the minimum OPP.
  */
@@ -349,7 +351,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
        }
 
        /* First wakeup after IO: start with minimum boost */
-       sg_cpu->iowait_boost = sg_cpu->min;
+       sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
 }
 
 /**
@@ -389,7 +391,7 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
                 * No boost pending; reduce the boost value.
                 */
                sg_cpu->iowait_boost >>= 1;
-               if (sg_cpu->iowait_boost < sg_cpu->min) {
+               if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
                        sg_cpu->iowait_boost = 0;
                        return util;
                }
@@ -826,9 +828,6 @@ static int sugov_start(struct cpufreq_policy *policy)
                memset(sg_cpu, 0, sizeof(*sg_cpu));
                sg_cpu->cpu                     = cpu;
                sg_cpu->sg_policy               = sg_policy;
-               sg_cpu->min                     =
-                       (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
-                       policy->cpuinfo.max_freq;
        }
 
        for_each_cpu(cpu, policy->cpus) {
index 6a73e41a20160bf760e09ca050a82657ccf95347..43901fa3f26932d334f34f4b474a3cb55821513a 100644 (file)
@@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p)
        if (dl_entity_is_special(dl_se))
                return;
 
-       WARN_ON(hrtimer_active(&dl_se->inactive_timer));
        WARN_ON(dl_se->dl_non_contending);
 
        zerolag_time = dl_se->deadline -
@@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p)
         * If the "0-lag time" already passed, decrease the active
         * utilization now, instead of starting a timer
         */
-       if (zerolag_time < 0) {
+       if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
                if (dl_task(p))
                        sub_running_bw(dl_se, dl_rq);
                if (!dl_task(p) || p->state == TASK_DEAD) {
index 40bd1e27b1b79f88e3ba23b413e5d0d7f1b28094..35f3ea3750844c2a8789e7b6312bf8432caa1e5a 100644 (file)
@@ -2007,6 +2007,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
        if (p->last_task_numa_placement) {
                delta = runtime - p->last_sum_exec_runtime;
                *period = now - p->last_task_numa_placement;
+
+               /* Avoid time going backwards, prevent potential divide error: */
+               if (unlikely((s64)*period < 0))
+                       *period = 0;
        } else {
                delta = p->se.avg.load_sum;
                *period = LOAD_AVG_MAX;
@@ -4885,6 +4889,8 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
+extern const u64 max_cfs_quota_period;
+
 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 {
        struct cfs_bandwidth *cfs_b =
@@ -4892,6 +4898,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
        unsigned long flags;
        int overrun;
        int idle = 0;
+       int count = 0;
 
        raw_spin_lock_irqsave(&cfs_b->lock, flags);
        for (;;) {
@@ -4899,6 +4906,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
                if (!overrun)
                        break;
 
+               if (++count > 3) {
+                       u64 new, old = ktime_to_ns(cfs_b->period);
+
+                       new = (old * 147) / 128; /* ~115% */
+                       new = min(new, max_cfs_quota_period);
+
+                       cfs_b->period = ns_to_ktime(new);
+
+                       /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
+                       cfs_b->quota *= new;
+                       cfs_b->quota = div64_u64(cfs_b->quota, old);
+
+                       pr_warn_ratelimited(
+       "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
+                               smp_processor_id(),
+                               div_u64(new, NSEC_PER_USEC),
+                               div_u64(cfs_b->quota, NSEC_PER_USEC));
+
+                       /* reset count so we don't come right back in here */
+                       count = 0;
+               }
+
                idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
        }
        if (idle)
index f98448cf2defb5b2f212d005dcfa2a899252ae25..227ba170298e5b457c9b405c5376c466fe26850b 100644 (file)
@@ -3581,7 +3581,7 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
        if (flags)
                return -EINVAL;
 
-       f = fdget_raw(pidfd);
+       f = fdget(pidfd);
        if (!f.file)
                return -EBADF;
 
index 094b82ca95e524149d201ed51ae938bbaef7eedd..930113b9799acb85c3c83263a56129b06a8d5ed6 100644 (file)
@@ -272,7 +272,7 @@ static u64 notrace suspended_sched_clock_read(void)
        return cd.read_data[seq & 1].epoch_cyc;
 }
 
-static int sched_clock_suspend(void)
+int sched_clock_suspend(void)
 {
        struct clock_read_data *rd = &cd.read_data[0];
 
@@ -283,7 +283,7 @@ static int sched_clock_suspend(void)
        return 0;
 }
 
-static void sched_clock_resume(void)
+void sched_clock_resume(void)
 {
        struct clock_read_data *rd = &cd.read_data[0];
 
index 529143b4c8d2a5212ded80e543d74b2cda8b8744..df401463a19131dc71b1e395577907c80f7f233c 100644 (file)
@@ -487,6 +487,7 @@ void tick_freeze(void)
                trace_suspend_resume(TPS("timekeeping_freeze"),
                                     smp_processor_id(), true);
                system_state = SYSTEM_SUSPEND;
+               sched_clock_suspend();
                timekeeping_suspend();
        } else {
                tick_suspend_local();
@@ -510,6 +511,7 @@ void tick_unfreeze(void)
 
        if (tick_freeze_depth == num_online_cpus()) {
                timekeeping_resume();
+               sched_clock_resume();
                system_state = SYSTEM_RUNNING;
                trace_suspend_resume(TPS("timekeeping_freeze"),
                                     smp_processor_id(), false);
index 6fa52cd6df0be2f53c8e79bf0e8cf16bcf0310f0..8d18e03124ff4ce2d629b6f81951626ae1e76d86 100644 (file)
@@ -1022,6 +1022,18 @@ bool tick_nohz_idle_got_tick(void)
        return false;
 }
 
+/**
+ * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
+ * or the tick, whatever that expires first. Note that, if the tick has been
+ * stopped, it returns the next hrtimer.
+ *
+ * Called from power state control code with interrupts disabled
+ */
+ktime_t tick_nohz_get_next_hrtimer(void)
+{
+       return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
+}
+
 /**
  * tick_nohz_get_sleep_length - return the expected length of the current sleep
  * @delta_next: duration until the next event if the tick cannot be stopped
index 7a9b4eb7a1d5bde85e7a7b1c7747602cdd605975..141ab3ab0354f39fdb5daf7274e8d061d90a556c 100644 (file)
@@ -14,6 +14,13 @@ extern u64 timekeeping_max_deferment(void);
 extern void timekeeping_warp_clock(void);
 extern int timekeeping_suspend(void);
 extern void timekeeping_resume(void);
+#ifdef CONFIG_GENERIC_SCHED_CLOCK
+extern int sched_clock_suspend(void);
+extern void sched_clock_resume(void);
+#else
+static inline int sched_clock_suspend(void) { return 0; }
+static inline void sched_clock_resume(void) { }
+#endif
 
 extern void do_timer(unsigned long ticks);
 extern void update_wall_time(void);
index 26c8ca9bd06b6725b84f42d6b635c0d256118ebb..b920358dd8f7f8cfcd226ba046e786901699c53a 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/list.h>
 #include <linux/hash.h>
 #include <linux/rcupdate.h>
+#include <linux/kprobes.h>
 
 #include <trace/events/sched.h>
 
@@ -6246,7 +6247,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
        tr->ops->func = ftrace_stub;
 }
 
-static inline void
+static nokprobe_inline void
 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *ignored, struct pt_regs *regs)
 {
@@ -6306,11 +6307,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 {
        __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 }
+NOKPROBE_SYMBOL(ftrace_ops_list_func);
 #else
 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
 {
        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
+NOKPROBE_SYMBOL(ftrace_ops_no_ops);
 #endif
 
 /*
@@ -6337,6 +6340,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
        preempt_enable_notrace();
        trace_clear_recursion(bit);
 }
+NOKPROBE_SYMBOL(ftrace_ops_assist_func);
 
 /**
  * ftrace_ops_get_func - get the function a trampoline should call
index 41b6f96e5366231d72454e6c33015188066751ae..4ee8d8aa3d0fdcfe6dac6ea91c4ee96cc9330835 100644 (file)
@@ -762,7 +762,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 
        preempt_disable_notrace();
        time = rb_time_stamp(buffer);
-       preempt_enable_no_resched_notrace();
+       preempt_enable_notrace();
 
        return time;
 }
index 6c24755655c752a3bf9f4bb914ddb251d9ab0d2e..ca1ee656d6d852952670b0e63fd19f7499c679a6 100644 (file)
@@ -496,8 +496,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
         * not modified.
         */
        pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
-       if (!pid_list)
+       if (!pid_list) {
+               trace_parser_put(&parser);
                return -ENOMEM;
+       }
 
        pid_list->pid_max = READ_ONCE(pid_max);
 
@@ -507,6 +509,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
 
        pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
        if (!pid_list->pids) {
+               trace_parser_put(&parser);
                kfree(pid_list);
                return -ENOMEM;
        }
@@ -7025,19 +7028,23 @@ struct buffer_ref {
        struct ring_buffer      *buffer;
        void                    *page;
        int                     cpu;
-       int                     ref;
+       refcount_t              refcount;
 };
 
+static void buffer_ref_release(struct buffer_ref *ref)
+{
+       if (!refcount_dec_and_test(&ref->refcount))
+               return;
+       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
+       kfree(ref);
+}
+
 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
                                    struct pipe_buffer *buf)
 {
        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 
-       if (--ref->ref)
-               return;
-
-       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
-       kfree(ref);
+       buffer_ref_release(ref);
        buf->private = 0;
 }
 
@@ -7046,10 +7053,10 @@ static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
 {
        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 
-       if (ref->ref > INT_MAX/2)
+       if (refcount_read(&ref->refcount) > INT_MAX/2)
                return false;
 
-       ref->ref++;
+       refcount_inc(&ref->refcount);
        return true;
 }
 
@@ -7057,7 +7064,7 @@ static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
        .confirm                = generic_pipe_buf_confirm,
        .release                = buffer_pipe_buf_release,
-       .steal                  = generic_pipe_buf_steal,
+       .steal                  = generic_pipe_buf_nosteal,
        .get                    = buffer_pipe_buf_get,
 };
 
@@ -7070,11 +7077,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
        struct buffer_ref *ref =
                (struct buffer_ref *)spd->partial[i].private;
 
-       if (--ref->ref)
-               return;
-
-       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
-       kfree(ref);
+       buffer_ref_release(ref);
        spd->partial[i].private = 0;
 }
 
@@ -7129,7 +7132,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                        break;
                }
 
-               ref->ref = 1;
+               refcount_set(&ref->refcount, 1);
                ref->buffer = iter->trace_buffer->buffer;
                ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
                if (IS_ERR(ref->page)) {
index 71381168dedef4e88382a1849412f554a4cb4a56..247bf0b1582ca1cf352f006aa1fd5f689f8f5859 100644 (file)
@@ -135,7 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
                if (__this_cpu_read(hard_watchdog_warn) == true)
                        return;
 
-               pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+               pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
+                        this_cpu);
                print_modules();
                print_irqtrace_events(current);
                if (regs)
index 0d9e81779e373745c3e28497df424b415a50c3ac..d5a4a4036d2f83db9df1c5e2183380f900e62ca3 100644 (file)
@@ -753,9 +753,9 @@ endmenu # "Memory Debugging"
 config ARCH_HAS_KCOV
        bool
        help
-         KCOV does not have any arch-specific code, but currently it is enabled
-         only for x86_64. KCOV requires testing on other archs, and most likely
-         disabling of instrumentation for some early boot code.
+         An architecture should select this when it can successfully
+         build and run with CONFIG_KCOV. This typically requires
+         disabling instrumentation for some early boot code.
 
 config CC_HAS_SANCOV_TRACE_PC
        def_bool $(cc-option,-fsanitize-coverage=trace-pc)
@@ -1929,6 +1929,7 @@ config TEST_KMOD
        depends on m
        depends on BLOCK && (64BIT || LBDAF)      # for XFS, BTRFS
        depends on NETDEVICES && NET_CORE && INET # for TUN
+       depends on BLOCK
        select TEST_LKM
        select XFS_FS
        select TUN
index 83cdcaa82bf6cbc9a78640795bc2bbd869ba92bf..f832b095afba011293b6a3da18a2170b55501325 100644 (file)
@@ -383,14 +383,14 @@ static void shuffle_array(int *arr, int n)
 static int test_func(void *private)
 {
        struct test_driver *t = private;
-       cpumask_t newmask = CPU_MASK_NONE;
        int random_array[ARRAY_SIZE(test_case_array)];
        int index, i, j, ret;
        ktime_t kt;
        u64 delta;
 
-       cpumask_set_cpu(t->cpu, &newmask);
-       set_cpus_allowed_ptr(current, &newmask);
+       ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
+       if (ret < 0)
+               pr_err("Failed to set affinity to %d CPU\n", t->cpu);
 
        for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
                random_array[i] = i;
index 6c318f5ac234f40237aec9a0bca5c9e7791cd5dc..2e435b8142e51ac9237110b451b750a6d4423fcc 100644 (file)
@@ -1401,6 +1401,7 @@ static void scan_block(void *_start, void *_end,
 /*
  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
  */
+#ifdef CONFIG_SMP
 static void scan_large_block(void *start, void *end)
 {
        void *next;
@@ -1412,6 +1413,7 @@ static void scan_large_block(void *start, void *end)
                cond_resched();
        }
 }
+#endif
 
 /*
  * Scan a memory block corresponding to a kmemleak_object. A condition is
index 0082d699be94b4c28e1820351916568e68a684bb..b236069ff0d823ce92a84222494d42bdfa97c20c 100644 (file)
@@ -874,6 +874,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
         */
        mem = find_memory_block(__pfn_to_section(pfn));
        nid = mem->nid;
+       put_device(&mem->dev);
 
        /* associate pfn range with the zone */
        zone = move_pfn_range(online_type, nid, pfn, nr_pages);
index 41eb48d9b5276733e48b95f1addfcb228becd993..bd7b9f293b391f22b85810e48bc7c0679b217f05 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -45,6 +45,7 @@
 #include <linux/moduleparam.h>
 #include <linux/pkeys.h>
 #include <linux/oom.h>
+#include <linux/sched/mm.h>
 
 #include <linux/uaccess.h>
 #include <asm/cacheflush.h>
@@ -2525,7 +2526,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
        vma = find_vma_prev(mm, addr, &prev);
        if (vma && (vma->vm_start <= addr))
                return vma;
-       if (!prev || expand_stack(prev, addr))
+       /* don't alter vm_end if the coredump is running */
+       if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
                return NULL;
        if (prev->vm_flags & VM_LOCKED)
                populate_vma_page_range(prev, addr, prev->vm_end, NULL);
@@ -2551,6 +2553,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
                return vma;
        if (!(vma->vm_flags & VM_GROWSDOWN))
                return NULL;
+       /* don't alter vm_start if the coredump is running */
+       if (!mmget_still_valid(mm))
+               return NULL;
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
index d96ca5bc555bbc432e135c876151e0699ee88162..c02cff1ed56eb231fef3a5ffacfd9aa64d8dda54 100644 (file)
@@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = {
 
 int min_free_kbytes = 1024;
 int user_min_free_kbytes = -1;
+#ifdef CONFIG_DISCONTIGMEM
+/*
+ * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
+ * are not on separate NUMA nodes. Functionally this works but with
+ * watermark_boost_factor, it can reclaim prematurely as the ranges can be
+ * quite small. By default, do not boost watermarks on discontigmem as in
+ * many cases very high-order allocations like THP are likely to be
+ * unsupported and the premature reclaim offsets the advantage of long-term
+ * fragmentation avoidance.
+ */
+int watermark_boost_factor __read_mostly;
+#else
 int watermark_boost_factor __read_mostly = 15000;
+#endif
 int watermark_scale_factor = 10;
 
 static unsigned long nr_kernel_pages __initdata;
@@ -3419,8 +3432,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
                alloc_flags |= ALLOC_KSWAPD;
 
 #ifdef CONFIG_ZONE_DMA32
+       if (!zone)
+               return alloc_flags;
+
        if (zone_idx(zone) != ZONE_NORMAL)
-               goto out;
+               return alloc_flags;
 
        /*
         * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
@@ -3429,9 +3445,9 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
         */
        BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
        if (nr_online_nodes > 1 && !populated_zone(--zone))
-               goto out;
+               return alloc_flags;
 
-out:
+       alloc_flags |= ALLOC_NOFRAGMENT;
 #endif /* CONFIG_ZONE_DMA32 */
        return alloc_flags;
 }
@@ -3773,11 +3789,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        memalloc_noreclaim_restore(noreclaim_flag);
        psi_memstall_leave(&pflags);
 
-       if (*compact_result <= COMPACT_INACTIVE) {
-               WARN_ON_ONCE(page);
-               return NULL;
-       }
-
        /*
         * At least in one zone compaction wasn't deferred or skipped, so let's
         * count a compaction stall
@@ -8005,7 +8016,10 @@ void *__init alloc_large_system_hash(const char *tablename,
 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                         int migratetype, int flags)
 {
-       unsigned long pfn, iter, found;
+       unsigned long found;
+       unsigned long iter = 0;
+       unsigned long pfn = page_to_pfn(page);
+       const char *reason = "unmovable page";
 
        /*
         * TODO we could make this much more efficient by not checking every
@@ -8015,17 +8029,20 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
         * can still lead to having bootmem allocations in zone_movable.
         */
 
-       /*
-        * CMA allocations (alloc_contig_range) really need to mark isolate
-        * CMA pageblocks even when they are not movable in fact so consider
-        * them movable here.
-        */
-       if (is_migrate_cma(migratetype) &&
-                       is_migrate_cma(get_pageblock_migratetype(page)))
-               return false;
+       if (is_migrate_cma_page(page)) {
+               /*
+                * CMA allocations (alloc_contig_range) really need to mark
+                * isolate CMA pageblocks even when they are not movable in fact
+                * so consider them movable here.
+                */
+               if (is_migrate_cma(migratetype))
+                       return false;
+
+               reason = "CMA page";
+               goto unmovable;
+       }
 
-       pfn = page_to_pfn(page);
-       for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
+       for (found = 0; iter < pageblock_nr_pages; iter++) {
                unsigned long check = pfn + iter;
 
                if (!pfn_valid_within(check))
@@ -8105,7 +8122,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
 unmovable:
        WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
        if (flags & REPORT_FAILURE)
-               dump_page(pfn_to_page(pfn+iter), "unmovable page");
+               dump_page(pfn_to_page(pfn + iter), reason);
        return true;
 }
 
index 2e6fc8d552c96d58f615be2fd3addadefd01f5c0..68dd2e7e73b5f29b2d3dfd2bd9e4b984244d7dc9 100644 (file)
@@ -2567,8 +2567,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
                ai->groups[group].base_offset = areas[group] - base;
        }
 
-       pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
-               PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
+       pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
+               PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
                ai->dyn_size, ai->unit_size);
 
        rc = pcpu_setup_first_chunk(ai, base);
@@ -2692,8 +2692,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
        }
 
        /* we're ready, commit */
-       pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
-               unit_pages, psize_str, vm.addr, ai->static_size,
+       pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
+               unit_pages, psize_str, ai->static_size,
                ai->reserved_size, ai->dyn_size);
 
        rc = pcpu_setup_first_chunk(ai, vm.addr);
index b3db3779a30a1f1fbe2d5cda71ddc402601926ef..2275a0ff7c3051d9674ee59b153451c6078741c3 100644 (file)
@@ -1081,9 +1081,14 @@ static void shmem_evict_inode(struct inode *inode)
                        }
                        spin_unlock(&sbinfo->shrinklist_lock);
                }
-               if (!list_empty(&info->swaplist)) {
+               while (!list_empty(&info->swaplist)) {
+                       /* Wait while shmem_unuse() is scanning this inode... */
+                       wait_var_event(&info->stop_eviction,
+                                      !atomic_read(&info->stop_eviction));
                        mutex_lock(&shmem_swaplist_mutex);
-                       list_del_init(&info->swaplist);
+                       /* ...but beware of the race if we peeked too early */
+                       if (!atomic_read(&info->stop_eviction))
+                               list_del_init(&info->swaplist);
                        mutex_unlock(&shmem_swaplist_mutex);
                }
        }
@@ -1099,10 +1104,11 @@ extern struct swap_info_struct *swap_info[];
 static int shmem_find_swap_entries(struct address_space *mapping,
                                   pgoff_t start, unsigned int nr_entries,
                                   struct page **entries, pgoff_t *indices,
-                                  bool frontswap)
+                                  unsigned int type, bool frontswap)
 {
        XA_STATE(xas, &mapping->i_pages, start);
        struct page *page;
+       swp_entry_t entry;
        unsigned int ret = 0;
 
        if (!nr_entries)
@@ -1116,13 +1122,12 @@ static int shmem_find_swap_entries(struct address_space *mapping,
                if (!xa_is_value(page))
                        continue;
 
-               if (frontswap) {
-                       swp_entry_t entry = radix_to_swp_entry(page);
-
-                       if (!frontswap_test(swap_info[swp_type(entry)],
-                                           swp_offset(entry)))
-                               continue;
-               }
+               entry = radix_to_swp_entry(page);
+               if (swp_type(entry) != type)
+                       continue;
+               if (frontswap &&
+                   !frontswap_test(swap_info[type], swp_offset(entry)))
+                       continue;
 
                indices[ret] = xas.xa_index;
                entries[ret] = page;
@@ -1194,7 +1199,7 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type,
 
                pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
                                                  pvec.pages, indices,
-                                                 frontswap);
+                                                 type, frontswap);
                if (pvec.nr == 0) {
                        ret = 0;
                        break;
@@ -1227,36 +1232,27 @@ int shmem_unuse(unsigned int type, bool frontswap,
                unsigned long *fs_pages_to_unuse)
 {
        struct shmem_inode_info *info, *next;
-       struct inode *inode;
-       struct inode *prev_inode = NULL;
        int error = 0;
 
        if (list_empty(&shmem_swaplist))
                return 0;
 
        mutex_lock(&shmem_swaplist_mutex);
-
-       /*
-        * The extra refcount on the inode is necessary to safely dereference
-        * p->next after re-acquiring the lock. New shmem inodes with swap
-        * get added to the end of the list and we will scan them all.
-        */
        list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
                if (!info->swapped) {
                        list_del_init(&info->swaplist);
                        continue;
                }
-
-               inode = igrab(&info->vfs_inode);
-               if (!inode)
-                       continue;
-
+               /*
+                * Drop the swaplist mutex while searching the inode for swap;
+                * but before doing so, make sure shmem_evict_inode() will not
+                * remove placeholder inode from swaplist, nor let it be freed
+                * (igrab() would protect from unlink, but not from unmount).
+                */
+               atomic_inc(&info->stop_eviction);
                mutex_unlock(&shmem_swaplist_mutex);
-               if (prev_inode)
-                       iput(prev_inode);
-               prev_inode = inode;
 
-               error = shmem_unuse_inode(inode, type, frontswap,
+               error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
                                          fs_pages_to_unuse);
                cond_resched();
 
@@ -1264,14 +1260,13 @@ int shmem_unuse(unsigned int type, bool frontswap,
                next = list_next_entry(info, swaplist);
                if (!info->swapped)
                        list_del_init(&info->swaplist);
+               if (atomic_dec_and_test(&info->stop_eviction))
+                       wake_up_var(&info->stop_eviction);
                if (error)
                        break;
        }
        mutex_unlock(&shmem_swaplist_mutex);
 
-       if (prev_inode)
-               iput(prev_inode);
-
        return error;
 }
 
@@ -2238,6 +2233,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                info = SHMEM_I(inode);
                memset(info, 0, (char *)inode - (char *)info);
                spin_lock_init(&info->lock);
+               atomic_set(&info->stop_eviction, 0);
                info->seals = F_SEAL_SEAL;
                info->flags = flags & VM_NORESERVE;
                INIT_LIST_HEAD(&info->shrinklist);
index 47a380a486eefdfb2c93434c920dcecb53e272b6..9142ee99249327f22224b32c5805002ec4615dd3 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2374,7 +2374,6 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
                /* Slab management obj is off-slab. */
                freelist = kmem_cache_alloc_node(cachep->freelist_cache,
                                              local_flags, nodeid);
-               freelist = kasan_reset_tag(freelist);
                if (!freelist)
                        return NULL;
        } else {
index 2b8d9c3fbb47fd7a5c2a711dad73c5889dfe0bb2..cf63b5f01adf7da9d1def93f8763b50243adc698 100644 (file)
@@ -2023,7 +2023,6 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
  * If the boolean frontswap is true, only unuse pages_to_unuse pages;
  * pages_to_unuse==0 means all pages; ignored if frontswap is false
  */
-#define SWAP_UNUSE_MAX_TRIES 3
 int try_to_unuse(unsigned int type, bool frontswap,
                 unsigned long pages_to_unuse)
 {
@@ -2035,7 +2034,6 @@ int try_to_unuse(unsigned int type, bool frontswap,
        struct page *page;
        swp_entry_t entry;
        unsigned int i;
-       int retries = 0;
 
        if (!si->inuse_pages)
                return 0;
@@ -2053,11 +2051,9 @@ int try_to_unuse(unsigned int type, bool frontswap,
 
        spin_lock(&mmlist_lock);
        p = &init_mm.mmlist;
-       while ((p = p->next) != &init_mm.mmlist) {
-               if (signal_pending(current)) {
-                       retval = -EINTR;
-                       break;
-               }
+       while (si->inuse_pages &&
+              !signal_pending(current) &&
+              (p = p->next) != &init_mm.mmlist) {
 
                mm = list_entry(p, struct mm_struct, mmlist);
                if (!mmget_not_zero(mm))
@@ -2084,7 +2080,9 @@ int try_to_unuse(unsigned int type, bool frontswap,
        mmput(prev_mm);
 
        i = 0;
-       while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
+       while (si->inuse_pages &&
+              !signal_pending(current) &&
+              (i = find_next_to_unuse(si, i, frontswap)) != 0) {
 
                entry = swp_entry(type, i);
                page = find_get_page(swap_address_space(entry), i);
@@ -2117,14 +2115,18 @@ int try_to_unuse(unsigned int type, bool frontswap,
         * If yes, we would need to do retry the unuse logic again.
         * Under global memory pressure, swap entries can be reinserted back
         * into process space after the mmlist loop above passes over them.
-        * Its not worth continuosuly retrying to unuse the swap in this case.
-        * So we try SWAP_UNUSE_MAX_TRIES times.
+        *
+        * Limit the number of retries? No: when mmget_not_zero() above fails,
+        * that mm is likely to be freeing swap from exit_mmap(), which proceeds
+        * at its own independent pace; and even shmem_writepage() could have
+        * been preempted after get_swap_page(), temporarily hiding that swap.
+        * It's easy and robust (though cpu-intensive) just to keep retrying.
         */
-       if (++retries >= SWAP_UNUSE_MAX_TRIES)
-               retval = -EBUSY;
-       else if (si->inuse_pages)
-               goto retry;
-
+       if (si->inuse_pages) {
+               if (!signal_pending(current))
+                       goto retry;
+               retval = -EINTR;
+       }
 out:
        return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
 }
index a5ad0b35ab8e3e6bea056baf2e5d8729f1003326..a815f73ee4d5b2d1a9872cca19db055845499aa1 100644 (file)
@@ -2176,7 +2176,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
  *   10TB     320        32GB
  */
 static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
-                                struct mem_cgroup *memcg,
                                 struct scan_control *sc, bool actual_reclaim)
 {
        enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
@@ -2197,16 +2196,12 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
        inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
        active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
 
-       if (memcg)
-               refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
-       else
-               refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
-
        /*
         * When refaults are being observed, it means a new workingset
         * is being established. Disable active list protection to get
         * rid of the stale workingset quickly.
         */
+       refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
        if (file && actual_reclaim && lruvec->refaults != refaults) {
                inactive_ratio = 0;
        } else {
@@ -2227,12 +2222,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
 }
 
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
-                                struct lruvec *lruvec, struct mem_cgroup *memcg,
-                                struct scan_control *sc)
+                                struct lruvec *lruvec, struct scan_control *sc)
 {
        if (is_active_lru(lru)) {
-               if (inactive_list_is_low(lruvec, is_file_lru(lru),
-                                        memcg, sc, true))
+               if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true))
                        shrink_active_list(nr_to_scan, lruvec, sc, lru);
                return 0;
        }
@@ -2332,7 +2325,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
                         * anonymous pages on the LRU in eligible zones.
                         * Otherwise, the small LRU gets thrashed.
                         */
-                       if (!inactive_list_is_low(lruvec, false, memcg, sc, false) &&
+                       if (!inactive_list_is_low(lruvec, false, sc, false) &&
                            lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx)
                                        >> sc->priority) {
                                scan_balance = SCAN_ANON;
@@ -2350,7 +2343,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
         * lruvec even if it has plenty of old anonymous pages unless the
         * system is under heavy pressure.
         */
-       if (!inactive_list_is_low(lruvec, true, memcg, sc, false) &&
+       if (!inactive_list_is_low(lruvec, true, sc, false) &&
            lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
                scan_balance = SCAN_FILE;
                goto out;
@@ -2503,7 +2496,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
                                nr[lru] -= nr_to_scan;
 
                                nr_reclaimed += shrink_list(lru, nr_to_scan,
-                                                           lruvec, memcg, sc);
+                                                           lruvec, sc);
                        }
                }
 
@@ -2570,7 +2563,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_list_is_low(lruvec, false, memcg, sc, true))
+       if (inactive_list_is_low(lruvec, false, sc, true))
                shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                   sc, LRU_ACTIVE_ANON);
 }
@@ -2969,12 +2962,8 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
                unsigned long refaults;
                struct lruvec *lruvec;
 
-               if (memcg)
-                       refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
-               else
-                       refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
-
                lruvec = mem_cgroup_lruvec(pgdat, memcg);
+               refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
                lruvec->refaults = refaults;
        } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
 }
@@ -3339,7 +3328,7 @@ static void age_active_anon(struct pglist_data *pgdat,
        do {
                struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
 
-               if (inactive_list_is_low(lruvec, false, memcg, sc, true))
+               if (inactive_list_is_low(lruvec, false, sc, true))
                        shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                           sc, LRU_ACTIVE_ANON);
 
index 36b56f858f0f0a8eb8db003d2b2a74a21a4c37f1..a7d493366a65b31b547ef25d34b94f2417895372 100644 (file)
@@ -1274,13 +1274,8 @@ const char * const vmstat_text[] = {
 #endif
 #endif /* CONFIG_MEMORY_BALLOON */
 #ifdef CONFIG_DEBUG_TLBFLUSH
-#ifdef CONFIG_SMP
        "nr_tlb_remote_flush",
        "nr_tlb_remote_flush_received",
-#else
-       "", /* nr_tlb_remote_flush */
-       "", /* nr_tlb_remote_flush_received */
-#endif /* CONFIG_SMP */
        "nr_tlb_local_flush_all",
        "nr_tlb_local_flush_one",
 #endif /* CONFIG_DEBUG_TLBFLUSH */
index d7f5cf5b7594d0ea4e766e06fbc07e6fce590e3b..ad4f829193f053c8a0c0846f1e9f619617dcd18e 100644 (file)
@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
 
 static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
 {
-       if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
+       if (arg < 0 || arg >= MAX_LEC_ITF)
+               return -EINVAL;
+       arg = array_index_nospec(arg, MAX_LEC_ITF);
+       if (!dev_lec[arg])
                return -EINVAL;
        vcc->proto_data = dev_lec[arg];
        return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
                i = arg;
        if (arg >= MAX_LEC_ITF)
                return -EINVAL;
+       i = array_index_nospec(arg, MAX_LEC_ITF);
        if (!dev_lec[i]) {
                int size;
 
index 9a580999ca57e3037336bbcdb321dbb4ef0cb196..d892b7c3cc42a05e10053832d7bd4d969f019e46 100644 (file)
@@ -523,12 +523,12 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
        struct sock *sk = sock->sk;
        int err = 0;
 
-       BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
-
        if (!addr || addr_len < sizeof(struct sockaddr_sco) ||
            addr->sa_family != AF_BLUETOOTH)
                return -EINVAL;
 
+       BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
+
        lock_sock(sk);
 
        if (sk->sk_state != BT_OPEN) {
index 5ea7e56119c13876a8726ffee2e9dc43ce73406f..ba303ee99b9b59762e724072d0f66907f46235b2 100644 (file)
@@ -197,13 +197,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
 /* note: already called with rcu_read_lock */
 static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       struct net_bridge_port *p = br_port_get_rcu(skb->dev);
-
        __br_handle_local_finish(skb);
 
-       BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
-       br_pass_frame_up(skb);
-       return 0;
+       /* return 1 to signal the okfn() was called so it's ok to use the skb */
+       return 1;
 }
 
 /*
@@ -280,10 +277,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                                goto forward;
                }
 
-               /* Deliver packet to local host only */
-               NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
-                       NULL, skb, skb->dev, NULL, br_handle_local_finish);
-               return RX_HANDLER_CONSUMED;
+               /* The else clause should be hit when nf_hook():
+                *   - returns < 0 (drop/error)
+                *   - returns = 0 (stolen/nf_queue)
+                * Thus return 1 from the okfn() to signal the skb is ok to pass
+                */
+               if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+                           dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+                           br_handle_local_finish) == 1) {
+                       return RX_HANDLER_PASS;
+               } else {
+                       return RX_HANDLER_CONSUMED;
+               }
        }
 
 forward:
index 02da21d771c9cc7d93c442f0310ad2ab5d9b66f6..45e7f4173bbafe7e59e2ea514a7bccbee8456c79 100644 (file)
@@ -2031,7 +2031,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
 
        __br_multicast_open(br, query);
 
-       list_for_each_entry(port, &br->port_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &br->port_list, list) {
                if (port->state == BR_STATE_DISABLED ||
                    port->state == BR_STATE_BLOCKING)
                        continue;
@@ -2043,6 +2044,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
                        br_multicast_enable(&port->ip6_own_query);
 #endif
        }
+       rcu_read_unlock();
 }
 
 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
index 9c07591b0232e6bbdecf7efece7c9143842671b1..7104cf13da840d21cca1a63d3c1551ff9bbd9076 100644 (file)
@@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
            nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
                       br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
            nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
-                      br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT)))
+                      br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
                return -EMSGSIZE;
 #endif
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
index eb15891f8b9ff18842b7d43e96c75733ef7aaa99..3cad01ac64e4a2a5ebcafa394d04cc4415c7ddab 100644 (file)
@@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
                if (match_kern)
                        match_kern->match_size = ret;
 
-               if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
+               /* rule should have no remaining data after target */
+               if (type == EBT_COMPAT_TARGET && size_left)
                        return -EINVAL;
 
                match32 = (struct compat_ebt_entry_mwt *) buf;
index fdcff29df9158bb850bc57cd04fe4a5971176d07..f409406254ddf2e204676bb8bdfb95d0cb3a0e71 100644 (file)
@@ -1184,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
        BUG_ON(!dev_net(dev));
 
        net = dev_net(dev);
-       if (dev->flags & IFF_UP)
+
+       /* Some auto-enslaved devices e.g. failover slaves are
+        * special, as userspace might rename the device after
+        * the interface had been brought up and running since
+        * the point kernel initiated auto-enslavement. Allow
+        * live name change even when these slave devices are
+        * up and running.
+        *
+        * Typically, users of these auto-enslaving devices
+        * don't actually care about slave name change, as
+        * they are supposed to operate on master interface
+        * directly.
+        */
+       if (dev->flags & IFF_UP &&
+           likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
                return -EBUSY;
 
        write_seqcount_begin(&devnet_rename_seq);
index 4a92a98ccce9a0570cdc75c66180db2f7305073f..b5cd3c727285d7a1738118c246abce8d31dac08f 100644 (file)
@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
                goto err_upper_link;
        }
 
-       slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 
        if (fops && fops->slave_register &&
            !fops->slave_register(slave_dev, failover_dev))
                return NOTIFY_OK;
 
        netdev_upper_dev_unlink(slave_dev, failover_dev);
-       slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 err_upper_link:
        netdev_rx_handler_unregister(slave_dev);
 done:
@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
 
        netdev_rx_handler_unregister(slave_dev);
        netdev_upper_dev_unlink(slave_dev, failover_dev);
-       slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 
        if (fops && fops->slave_unregister &&
            !fops->slave_unregister(slave_dev, failover_dev))
index fc92ebc4e200c5e7857734c749510b0542cf7290..27e61ffd903931c45f0a3f2f6e436937058dfb39 100644 (file)
@@ -4383,6 +4383,8 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
         * Only binding to IP is supported.
         */
        err = -EINVAL;
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return err;
        if (addr->sa_family == AF_INET) {
                if (addr_len < sizeof(struct sockaddr_in))
                        return err;
index f8f94303a1f57203eaa28b5ea459ac28c89e1b12..8f8b7b6c2945a75406c15e5faac61759a02db717 100644 (file)
@@ -1747,20 +1747,16 @@ int netdev_register_kobject(struct net_device *ndev)
 
        error = device_add(dev);
        if (error)
-               goto error_put_device;
+               return error;
 
        error = register_queue_kobjects(ndev);
-       if (error)
-               goto error_device_del;
+       if (error) {
+               device_del(dev);
+               return error;
+       }
 
        pm_runtime_set_memalloc_noio(dev, true);
 
-       return 0;
-
-error_device_del:
-       device_del(dev);
-error_put_device:
-       put_device(dev);
        return error;
 }
 
index 703cf76aa7c2dee7c5b556f5f035c015780f55f0..7109c168b5e0fb20b8b6ad8951893b181803fad8 100644 (file)
@@ -185,9 +185,10 @@ void __init ptp_classifier_init(void)
                { 0x16,  0,  0, 0x00000000 },
                { 0x06,  0,  0, 0x00000000 },
        };
-       struct sock_fprog_kern ptp_prog = {
-               .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
-       };
+       struct sock_fprog_kern ptp_prog;
+
+       ptp_prog.len = ARRAY_SIZE(ptp_filter);
+       ptp_prog.filter = ptp_filter;
 
        BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
 }
index a51cab95ba64c7d76a2ba0940c67e9f6e53f54e1..220c56e936592495656962050d285bb1c0024b37 100644 (file)
@@ -4948,7 +4948,7 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
 {
        struct if_stats_msg *ifsm;
 
-       if (nlh->nlmsg_len < sizeof(*ifsm)) {
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
                NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
                return -EINVAL;
        }
index ef2cd5712098965d3729f3ba748a8727ee300760..40796b8bf820450f5d0cce38986bd29137e2fd05 100644 (file)
@@ -5083,7 +5083,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
 
 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
 {
-       int mac_len;
+       int mac_len, meta_len;
+       void *meta;
 
        if (skb_cow(skb, skb_headroom(skb)) < 0) {
                kfree_skb(skb);
@@ -5095,6 +5096,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
                memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
                        mac_len - VLAN_HLEN - ETH_TLEN);
        }
+
+       meta_len = skb_metadata_len(skb);
+       if (meta_len) {
+               meta = skb_metadata_end(skb) - meta_len;
+               memmove(meta + VLAN_HLEN, meta, meta_len);
+       }
+
        skb->mac_header += VLAN_HLEN;
        return skb;
 }
index 782343bb925b643348cc906a70b97caa0388178d..067878a1e4c51363e065e13ccdb2b9d03c6a9c5f 100644 (file)
@@ -348,7 +348,7 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
                tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
        }
 
-       if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+       if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
                struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
                *(struct old_timeval32 *)optval = tv32;
                return sizeof(tv32);
@@ -372,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool
 {
        struct __kernel_sock_timeval tv;
 
-       if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+       if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
                struct old_timeval32 tv32;
 
                if (optlen < sizeof(tv32))
index 79e98e21cdd7f971694356065afb3f68fb34c1a0..12ce6c526d72bd15a16a9415ed992c25039d415a 100644 (file)
@@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        struct guehdr *guehdr;
        void *data;
        u16 doffset = 0;
+       u8 proto_ctype;
 
        if (!fou)
                return 1;
@@ -212,13 +213,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        if (unlikely(guehdr->control))
                return gue_control_message(skb, guehdr);
 
+       proto_ctype = guehdr->proto_ctype;
        __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
        skb_reset_transport_header(skb);
 
        if (iptunnel_pull_offloads(skb))
                goto drop;
 
-       return -guehdr->proto_ctype;
+       return -proto_ctype;
 
 drop:
        kfree_skb(skb);
index a5da63e5faa2d8118d3044a5a79b5e51bf61cafc..6fdf1c195d8e3a0e32af0359794f798457a21cb3 100644 (file)
@@ -1183,11 +1183,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
        return dst;
 }
 
+static void ipv4_send_dest_unreach(struct sk_buff *skb)
+{
+       struct ip_options opt;
+       int res;
+
+       /* Recompile ip options since IPCB may not be valid anymore.
+        * Also check we have a reasonable ipv4 header.
+        */
+       if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
+           ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
+               return;
+
+       memset(&opt, 0, sizeof(opt));
+       if (ip_hdr(skb)->ihl > 5) {
+               if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
+                       return;
+               opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+
+               rcu_read_lock();
+               res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+               rcu_read_unlock();
+
+               if (res)
+                       return;
+       }
+       __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
+}
+
 static void ipv4_link_failure(struct sk_buff *skb)
 {
        struct rtable *rt;
 
-       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+       ipv4_send_dest_unreach(skb);
 
        rt = skb_rtable(skb);
        if (rt)
index ba0fc4b1846561559ac995a444992f98a3187894..eeb4041fa5f905fb0f7c91ea6d74851ae97259f8 100644 (file)
@@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 static int comp_sack_nr_max = 255;
 static u32 u32_max_div_HZ = UINT_MAX / HZ;
+static int one_day_secs = 24 * 3600;
 
 /* obsolete */
 static int sysctl_tcp_low_latency __read_mostly;
@@ -1151,7 +1152,9 @@ static struct ctl_table ipv4_net_table[] = {
                .data           = &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one_day_secs
        },
        {
                .procname       = "tcp_autocorking",
index 359da68d7c0628360d5f1b727c878f678e28d39a..477cb4aa456c11c70185a982cbadafba857d3619 100644 (file)
@@ -49,9 +49,8 @@
 #define DCTCP_MAX_ALPHA        1024U
 
 struct dctcp {
-       u32 acked_bytes_ecn;
-       u32 acked_bytes_total;
-       u32 prior_snd_una;
+       u32 old_delivered;
+       u32 old_delivered_ce;
        u32 prior_rcv_nxt;
        u32 dctcp_alpha;
        u32 next_seq;
@@ -73,8 +72,8 @@ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
 {
        ca->next_seq = tp->snd_nxt;
 
-       ca->acked_bytes_ecn = 0;
-       ca->acked_bytes_total = 0;
+       ca->old_delivered = tp->delivered;
+       ca->old_delivered_ce = tp->delivered_ce;
 }
 
 static void dctcp_init(struct sock *sk)
@@ -86,7 +85,6 @@ static void dctcp_init(struct sock *sk)
             sk->sk_state == TCP_CLOSE)) {
                struct dctcp *ca = inet_csk_ca(sk);
 
-               ca->prior_snd_una = tp->snd_una;
                ca->prior_rcv_nxt = tp->rcv_nxt;
 
                ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
@@ -118,37 +116,25 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        struct dctcp *ca = inet_csk_ca(sk);
-       u32 acked_bytes = tp->snd_una - ca->prior_snd_una;
-
-       /* If ack did not advance snd_una, count dupack as MSS size.
-        * If ack did update window, do not count it at all.
-        */
-       if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE))
-               acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss;
-       if (acked_bytes) {
-               ca->acked_bytes_total += acked_bytes;
-               ca->prior_snd_una = tp->snd_una;
-
-               if (flags & CA_ACK_ECE)
-                       ca->acked_bytes_ecn += acked_bytes;
-       }
 
        /* Expired RTT */
        if (!before(tp->snd_una, ca->next_seq)) {
-               u64 bytes_ecn = ca->acked_bytes_ecn;
+               u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
                u32 alpha = ca->dctcp_alpha;
 
                /* alpha = (1 - g) * alpha + g * F */
 
                alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
-               if (bytes_ecn) {
+               if (delivered_ce) {
+                       u32 delivered = tp->delivered - ca->old_delivered;
+
                        /* If dctcp_shift_g == 1, a 32bit value would overflow
-                        * after 8 Mbytes.
+                        * after 8 M packets.
                         */
-                       bytes_ecn <<= (10 - dctcp_shift_g);
-                       do_div(bytes_ecn, max(1U, ca->acked_bytes_total));
+                       delivered_ce <<= (10 - dctcp_shift_g);
+                       delivered_ce /= max(1U, delivered);
 
-                       alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA);
+                       alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
                }
                /* dctcp_alpha can be read from dctcp_get_info() without
                 * synchro, so we ask compiler to not use dctcp_alpha
@@ -200,6 +186,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
                             union tcp_cc_info *info)
 {
        const struct dctcp *ca = inet_csk_ca(sk);
+       const struct tcp_sock *tp = tcp_sk(sk);
 
        /* Fill it also in case of VEGASINFO due to req struct limits.
         * We can still correctly retrieve it later.
@@ -211,8 +198,10 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
                        info->dctcp.dctcp_enabled = 1;
                        info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
                        info->dctcp.dctcp_alpha = ca->dctcp_alpha;
-                       info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn;
-                       info->dctcp.dctcp_ab_tot = ca->acked_bytes_total;
+                       info->dctcp.dctcp_ab_ecn = tp->mss_cache *
+                                                  (tp->delivered_ce - ca->old_delivered_ce);
+                       info->dctcp.dctcp_ab_tot = tp->mss_cache *
+                                                  (tp->delivered - ca->old_delivered);
                }
 
                *attr = INET_DIAG_DCTCPINFO;
index 5def3c48870e17f42ac9424a6ee091ac4824dabc..731d3045b50a0fb9a89c887a154db9a3da8c7ddd 100644 (file)
@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       int room;
+
+       room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
 
        /* Check #1 */
-       if (tp->rcv_ssthresh < tp->window_clamp &&
-           (int)tp->rcv_ssthresh < tcp_space(sk) &&
-           !tcp_under_memory_pressure(sk)) {
+       if (room > 0 && !tcp_under_memory_pressure(sk)) {
                int incr;
 
                /* Check #2. Increase window, if skb with such overhead
@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 
                if (incr) {
                        incr = max_t(int, incr, 2 * skb->len);
-                       tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
-                                              tp->window_clamp);
+                       tp->rcv_ssthresh += min(room, incr);
                        inet_csk(sk)->icsk_ack.quick |= 1;
                }
        }
index d43d076c98f5d30b058a49b5e477e76e9d97aec8..1766325423b5dad4d8c95c85605dc571248ba6d1 100644 (file)
@@ -476,7 +476,7 @@ static int ip6addrlbl_valid_dump_req(const struct nlmsghdr *nlh,
        }
 
        if (nlmsg_attrlen(nlh, sizeof(*ifal))) {
-               NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump requewst");
+               NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump request");
                return -EINVAL;
        }
 
index 0302e0eb07af1d270a615bcadfcb9bc08ca61d6c..7178e32eb15d0a969eb39fcfec9973bb0150bf48 100644 (file)
@@ -2330,6 +2330,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
 
                rcu_read_lock();
                from = rcu_dereference(rt6->from);
+               if (!from) {
+                       rcu_read_unlock();
+                       return;
+               }
                nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
                if (nrt6) {
                        rt6_do_update_pmtu(nrt6, mtu);
index b444483cdb2b42ef7acdbd7d23a0c046f55077c2..622eeaf5732b39b97752eefb864133e46b27a15d 100644 (file)
@@ -1047,6 +1047,8 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
                             int addr_len)
 {
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
        /* The following checks are replicated from __ip6_datagram_connect()
         * and intended to prevent BPF program called below from accessing
         * bytes that are out of the bound specified by user in addr_len.
index b99e73a7e7e0f2b4959b279e3aecbadf29667d55..2017b7d780f5af73c1ac7461113842776d1b00fc 100644 (file)
@@ -320,14 +320,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        struct llc_sap *sap;
        int rc = -EINVAL;
 
-       dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
-
        lock_sock(sk);
        if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
                goto out;
        rc = -EAFNOSUPPORT;
        if (unlikely(addr->sllc_family != AF_LLC))
                goto out;
+       dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
        rc = -ENODEV;
        rcu_read_lock();
        if (sk->sk_bound_dev_if) {
index 28d022a3eee305bc9d04531eb6b70d3b57412d93..ae4f0be3b393ba727b95060bb7148ec0cd961440 100644 (file)
@@ -1195,6 +1195,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
 
+       if (local->in_reconfig)
+               return;
+
        if (!check_sdata_in_driver(sdata))
                return;
 
index 4700718e010f5a886001e9a0a0326a628edf0739..37e372896230a08c6a9214f88ce54e7ad823d352 100644 (file)
@@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                 * The driver doesn't know anything about VLAN interfaces.
                 * Hence, don't send GTKs for VLAN interfaces to the driver.
                 */
-               if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+                       ret = 1;
                        goto out_unsupported;
+               }
        }
 
        ret = drv_set_key(key->local, SET_KEY, sdata,
@@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                /* all of these we can do in software - if driver can */
                if (ret == 1)
                        return 0;
-               if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
-                       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-                               return 0;
+               if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
                        return -EINVAL;
-               }
                return 0;
        default:
                return -EINVAL;
index 95eb5064fa9166220bf67af98dedf83726ffcdc8..b76a2aefa9ec05e5162ab565a108b5b98848116f 100644 (file)
@@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
 {
        /* Use last four bytes of hw addr as hash index */
-       return jhash_1word(*(u32 *)(addr+2), seed);
+       return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
 }
 
 static const struct rhashtable_params mesh_rht_params = {
index 7f8d93401ce070f9e2e61ce6a84e5ab8768b5811..bf0b187f994e9c56e191d2045f405cb6e6bac336 100644 (file)
@@ -1568,7 +1568,15 @@ static void sta_ps_start(struct sta_info *sta)
                return;
 
        for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
-               if (txq_has_queue(sta->sta.txq[tid]))
+               struct ieee80211_txq *txq = sta->sta.txq[tid];
+               struct txq_info *txqi = to_txq_info(txq);
+
+               spin_lock(&local->active_txq_lock[txq->ac]);
+               if (!list_empty(&txqi->schedule_order))
+                       list_del_init(&txqi->schedule_order);
+               spin_unlock(&local->active_txq_lock[txq->ac]);
+
+               if (txq_has_queue(txq))
                        set_bit(tid, &sta->txq_buffered_tids);
                else
                        clear_bit(tid, &sta->txq_buffered_tids);
index 366b9e6f043e2df89eccb4d63a9fb3ab1d7db023..40141df09f255fac46043f67656e98e16adda5b9 100644 (file)
@@ -1,4 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright (C) 2019 Intel Corporation
+ */
+
 #ifdef CONFIG_MAC80211_MESSAGE_TRACING
 
 #if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -11,7 +16,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mac80211_msg
 
-#define MAX_MSG_LEN    100
+#define MAX_MSG_LEN    120
 
 DECLARE_EVENT_CLASS(mac80211_msg_event,
        TP_PROTO(struct va_format *vaf),
index 8a49a74c0a374815ca2f374510216b334eb00013..2e816dd67be72d161bf1959554d293f2f6725673 100644 (file)
@@ -3221,6 +3221,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        u8 max_subframes = sta->sta.max_amsdu_subframes;
        int max_frags = local->hw.max_tx_fragments;
        int max_amsdu_len = sta->sta.max_amsdu_len;
+       int orig_truesize;
        __be16 len;
        void *data;
        bool ret = false;
@@ -3261,6 +3262,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (!head || skb_is_gso(head))
                goto out;
 
+       orig_truesize = head->truesize;
        orig_len = head->len;
 
        if (skb->len + head->len > max_amsdu_len)
@@ -3318,6 +3320,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        *frag_tail = skb;
 
 out_recalc:
+       fq->memory_usage += head->truesize - orig_truesize;
        if (head->len != orig_len) {
                flow->backlog += head->len - orig_len;
                tin->backlog_bytes += head->len - orig_len;
@@ -3646,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
 {
        struct ieee80211_local *local = hw_to_local(hw);
+       struct ieee80211_txq *ret = NULL;
        struct txq_info *txqi = NULL;
 
-       lockdep_assert_held(&local->active_txq_lock[ac]);
+       spin_lock_bh(&local->active_txq_lock[ac]);
 
  begin:
        txqi = list_first_entry_or_null(&local->active_txqs[ac],
                                        struct txq_info,
                                        schedule_order);
        if (!txqi)
-               return NULL;
+               goto out;
 
        if (txqi->txq.sta) {
                struct sta_info *sta = container_of(txqi->txq.sta,
@@ -3672,24 +3676,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
 
 
        if (txqi->schedule_round == local->schedule_round[ac])
-               return NULL;
+               goto out;
 
        list_del_init(&txqi->schedule_order);
        txqi->schedule_round = local->schedule_round[ac];
-       return &txqi->txq;
+       ret = &txqi->txq;
+
+out:
+       spin_unlock_bh(&local->active_txq_lock[ac]);
+       return ret;
 }
 EXPORT_SYMBOL(ieee80211_next_txq);
 
-void ieee80211_return_txq(struct ieee80211_hw *hw,
-                         struct ieee80211_txq *txq)
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq,
+                             bool force)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct txq_info *txqi = to_txq_info(txq);
 
-       lockdep_assert_held(&local->active_txq_lock[txq->ac]);
+       spin_lock_bh(&local->active_txq_lock[txq->ac]);
 
        if (list_empty(&txqi->schedule_order) &&
-           (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) {
+           (force || !skb_queue_empty(&txqi->frags) ||
+            txqi->tin.backlog_packets)) {
                /* If airtime accounting is active, always enqueue STAs at the
                 * head of the list to ensure that they only get moved to the
                 * back by the airtime DRR scheduler once they have a negative
@@ -3706,20 +3716,10 @@ void ieee80211_return_txq(struct ieee80211_hw *hw,
                        list_add_tail(&txqi->schedule_order,
                                      &local->active_txqs[txq->ac]);
        }
-}
-EXPORT_SYMBOL(ieee80211_return_txq);
 
-void ieee80211_schedule_txq(struct ieee80211_hw *hw,
-                           struct ieee80211_txq *txq)
-       __acquires(txq_lock) __releases(txq_lock)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
-
-       spin_lock_bh(&local->active_txq_lock[txq->ac]);
-       ieee80211_return_txq(hw, txq);
        spin_unlock_bh(&local->active_txq_lock[txq->ac]);
 }
-EXPORT_SYMBOL(ieee80211_schedule_txq);
+EXPORT_SYMBOL(__ieee80211_schedule_txq);
 
 bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
                                struct ieee80211_txq *txq)
@@ -3729,7 +3729,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
        struct sta_info *sta;
        u8 ac = txq->ac;
 
-       lockdep_assert_held(&local->active_txq_lock[ac]);
+       spin_lock_bh(&local->active_txq_lock[ac]);
 
        if (!txqi->txq.sta)
                goto out;
@@ -3759,34 +3759,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
 
        sta->airtime[ac].deficit += sta->airtime_weight;
        list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
+       spin_unlock_bh(&local->active_txq_lock[ac]);
 
        return false;
 out:
        if (!list_empty(&txqi->schedule_order))
                list_del_init(&txqi->schedule_order);
+       spin_unlock_bh(&local->active_txq_lock[ac]);
 
        return true;
 }
 EXPORT_SYMBOL(ieee80211_txq_may_transmit);
 
 void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
-       __acquires(txq_lock)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
        spin_lock_bh(&local->active_txq_lock[ac]);
        local->schedule_round[ac]++;
-}
-EXPORT_SYMBOL(ieee80211_txq_schedule_start);
-
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
-       __releases(txq_lock)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
-
        spin_unlock_bh(&local->active_txq_lock[ac]);
 }
-EXPORT_SYMBOL(ieee80211_txq_schedule_end);
+EXPORT_SYMBOL(ieee80211_txq_schedule_start);
 
 void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                                  struct net_device *dev,
index dc07fcc7938ec4da2b95e43530fffc0f5aefe82b..802db01e30754cfa66861acc555bf5b02d158df1 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 
 #include <net/ncsi.h>
@@ -667,7 +668,10 @@ static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
        ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN);
        /* Increase mac address by 1 for BMC's address */
-       saddr.sa_data[ETH_ALEN - 1]++;
+       eth_addr_inc((u8 *)saddr.sa_data);
+       if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+               return -ENXIO;
+
        ret = ops->ndo_set_mac_address(ndev, &saddr);
        if (ret < 0)
                netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
index 43bbaa32b1d65cbbec89d439d2ca9bd6bfe77cf3..14457551bcb4edca3047320028be0a331d185e14 100644 (file)
@@ -1678,7 +1678,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
        if (!cp) {
                int v;
 
-               if (!sysctl_schedule_icmp(ipvs))
+               if (ipip || !sysctl_schedule_icmp(ipvs))
                        return NF_ACCEPT;
 
                if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
index 82bfbeef46afa53fde8d428533999b382713f053..2a714527cde17aee1152f33bc7f9b041cd5eb087 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 #include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <linux/err.h>
 #include <linux/percpu.h>
 #include <linux/moduleparam.h>
@@ -449,6 +450,40 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 }
 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
 
+/* Generate a almost-unique pseudo-id for a given conntrack.
+ *
+ * intentionally doesn't re-use any of the seeds used for hash
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+ * of the nf_conn after it has been committed to main hash table:
+ *
+ * 1. nf_conn address
+ * 2. nf_conn->ext address
+ * 3. nf_conn->master address (normally NULL)
+ * 4. tuple
+ * 5. the associated net namespace
+ */
+u32 nf_ct_get_id(const struct nf_conn *ct)
+{
+       static __read_mostly siphash_key_t ct_id_seed;
+       unsigned long a, b, c, d;
+
+       net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+       a = (unsigned long)ct;
+       b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
+       c = (unsigned long)ct->ext;
+       d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+                                  &ct_id_seed);
+#ifdef CONFIG_64BIT
+       return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+#else
+       return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_id);
+
 static void
 clean_from_lists(struct nf_conn *ct)
 {
@@ -982,12 +1017,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
 
        /* set conntrack timestamp, if enabled. */
        tstamp = nf_conn_tstamp_find(ct);
-       if (tstamp) {
-               if (skb->tstamp == 0)
-                       __net_timestamp(skb);
+       if (tstamp)
+               tstamp->start = ktime_get_real_ns();
 
-               tstamp->start = ktime_to_ns(skb->tstamp);
-       }
        /* Since the lookup is lockless, hash insertion must be done after
         * starting the timer and setting the CONFIRMED bit. The RCU barriers
         * guarantee that no other CPU can find the conntrack before the above
@@ -1350,6 +1382,7 @@ __nf_conntrack_alloc(struct net *net,
        /* save hash for reusing when confirming */
        *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
        ct->status = 0;
+       ct->timeout = 0;
        write_pnet(&ct->ct_net, net);
        memset(&ct->__nfct_init_offset[0], 0,
               offsetof(struct nf_conn, proto) -
index 66c596d287a5dc44cea26680023e8c12798a5261..d7f61b0547c65c5e85a2080481906d2918a1eddf 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/siphash.h>
 
 #include <linux/netfilter.h>
 #include <net/netlink.h>
@@ -485,7 +486,9 @@ static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct)
 
 static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+       __be32 id = (__force __be32)nf_ct_get_id(ct);
+
+       if (nla_put_be32(skb, CTA_ID, id))
                goto nla_put_failure;
        return 0;
 
@@ -1286,8 +1289,9 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
        }
 
        if (cda[CTA_ID]) {
-               u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
-               if (id != (u32)(unsigned long)ct) {
+               __be32 id = nla_get_be32(cda[CTA_ID]);
+
+               if (id != (__force __be32)nf_ct_get_id(ct)) {
                        nf_ct_put(ct);
                        return -ENOENT;
                }
@@ -2692,6 +2696,25 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
 
 static const union nf_inet_addr any_addr;
 
+static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
+{
+       static __read_mostly siphash_key_t exp_id_seed;
+       unsigned long a, b, c, d;
+
+       net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
+
+       a = (unsigned long)exp;
+       b = (unsigned long)exp->helper;
+       c = (unsigned long)exp->master;
+       d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
+
+#ifdef CONFIG_64BIT
+       return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
+#else
+       return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
+#endif
+}
+
 static int
 ctnetlink_exp_dump_expect(struct sk_buff *skb,
                          const struct nf_conntrack_expect *exp)
@@ -2739,7 +2762,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
        }
 #endif
        if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
-           nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+           nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
            nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
            nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
                goto nla_put_failure;
@@ -3044,7 +3067,8 @@ static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
 
        if (cda[CTA_EXPECT_ID]) {
                __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
-               if (ntohl(id) != (u32)(unsigned long)exp) {
+
+               if (id != nf_expect_get_id(exp)) {
                        nf_ct_expect_put(exp);
                        return -ENOENT;
                }
index b9403a266a2e20c1651585a5c76beb8a65365609..37bb530d848fa2fa1d9f95e4f8174150260dbd38 100644 (file)
@@ -55,7 +55,7 @@ void nf_l4proto_log_invalid(const struct sk_buff *skb,
        struct va_format vaf;
        va_list args;
 
-       if (net->ct.sysctl_log_invalid != protonum ||
+       if (net->ct.sysctl_log_invalid != protonum &&
            net->ct.sysctl_log_invalid != IPPROTO_RAW)
                return;
 
index 7df477996b1642412faf22d4f08aa518d75f2649..9becac9535873cf7459579a70c3e3d60c055601b 100644 (file)
@@ -103,49 +103,94 @@ int nf_conntrack_icmp_packet(struct nf_conn *ct,
        return NF_ACCEPT;
 }
 
-/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
-static int
-icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb,
-                  const struct nf_hook_state *state)
+/* Check inner header is related to any of the existing connections */
+int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
+                           unsigned int dataoff,
+                           const struct nf_hook_state *state,
+                           u8 l4proto, union nf_inet_addr *outer_daddr)
 {
        struct nf_conntrack_tuple innertuple, origtuple;
        const struct nf_conntrack_tuple_hash *h;
        const struct nf_conntrack_zone *zone;
        enum ip_conntrack_info ctinfo;
        struct nf_conntrack_zone tmp;
+       union nf_inet_addr *ct_daddr;
+       enum ip_conntrack_dir dir;
+       struct nf_conn *ct;
 
        WARN_ON(skb_nfct(skb));
        zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
 
        /* Are they talking about one of our connections? */
-       if (!nf_ct_get_tuplepr(skb,
-                              skb_network_offset(skb) + ip_hdrlen(skb)
-                                                      + sizeof(struct icmphdr),
-                              PF_INET, state->net, &origtuple)) {
-               pr_debug("icmp_error_message: failed to get tuple\n");
+       if (!nf_ct_get_tuplepr(skb, dataoff,
+                              state->pf, state->net, &origtuple))
                return -NF_ACCEPT;
-       }
 
        /* Ordinarily, we'd expect the inverted tupleproto, but it's
           been preserved inside the ICMP. */
-       if (!nf_ct_invert_tuple(&innertuple, &origtuple)) {
-               pr_debug("icmp_error_message: no match\n");
+       if (!nf_ct_invert_tuple(&innertuple, &origtuple))
                return -NF_ACCEPT;
-       }
-
-       ctinfo = IP_CT_RELATED;
 
        h = nf_conntrack_find_get(state->net, zone, &innertuple);
-       if (!h) {
-               pr_debug("icmp_error_message: no match\n");
+       if (!h)
+               return -NF_ACCEPT;
+
+       /* Consider: A -> T (=This machine) -> B
+        *   Conntrack entry will look like this:
+        *      Original:  A->B
+        *      Reply:     B->T (SNAT case) OR A
+        *
+        * When this function runs, we got packet that looks like this:
+        * iphdr|icmphdr|inner_iphdr|l4header (tcp, udp, ..).
+        *
+        * Above nf_conntrack_find_get() makes lookup based on inner_hdr,
+        * so we should expect that destination of the found connection
+        * matches outer header destination address.
+        *
+        * In above example, we can consider these two cases:
+        *  1. Error coming in reply direction from B or M (middle box) to
+        *     T (SNAT case) or A.
+        *     Inner saddr will be B, dst will be T or A.
+        *     The found conntrack will be reply tuple (B->T/A).
+        *  2. Error coming in original direction from A or M to B.
+        *     Inner saddr will be A, inner daddr will be B.
+        *     The found conntrack will be original tuple (A->B).
+        *
+        * In both cases, conntrack[dir].dst == inner.dst.
+        *
+        * A bogus packet could look like this:
+        *   Inner: B->T
+        *   Outer: B->X (other machine reachable by T).
+        *
+        * In this case, lookup yields connection A->B and will
+        * set packet from B->X as *RELATED*, even though no connection
+        * from X was ever seen.
+        */
+       ct = nf_ct_tuplehash_to_ctrack(h);
+       dir = NF_CT_DIRECTION(h);
+       ct_daddr = &ct->tuplehash[dir].tuple.dst.u3;
+       if (!nf_inet_addr_cmp(outer_daddr, ct_daddr)) {
+               if (state->pf == AF_INET) {
+                       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                                              l4proto,
+                                              "outer daddr %pI4 != inner %pI4",
+                                              &outer_daddr->ip, &ct_daddr->ip);
+               } else if (state->pf == AF_INET6) {
+                       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                                              l4proto,
+                                              "outer daddr %pI6 != inner %pI6",
+                                              &outer_daddr->ip6, &ct_daddr->ip6);
+               }
+               nf_ct_put(ct);
                return -NF_ACCEPT;
        }
 
-       if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
+       ctinfo = IP_CT_RELATED;
+       if (dir == IP_CT_DIR_REPLY)
                ctinfo += IP_CT_IS_REPLY;
 
        /* Update skb to refer to this connection */
-       nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
+       nf_ct_set(skb, ct, ctinfo);
        return NF_ACCEPT;
 }
 
@@ -162,11 +207,12 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
                              struct sk_buff *skb, unsigned int dataoff,
                              const struct nf_hook_state *state)
 {
+       union nf_inet_addr outer_daddr;
        const struct icmphdr *icmph;
        struct icmphdr _ih;
 
        /* Not enough header? */
-       icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih);
+       icmph = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
        if (icmph == NULL) {
                icmp_error_log(skb, state, "short packet");
                return -NF_ACCEPT;
@@ -199,7 +245,12 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
            icmph->type != ICMP_REDIRECT)
                return NF_ACCEPT;
 
-       return icmp_error_message(tmpl, skb, state);
+       memset(&outer_daddr, 0, sizeof(outer_daddr));
+       outer_daddr.ip = ip_hdr(skb)->daddr;
+
+       dataoff += sizeof(*icmph);
+       return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
+                                      IPPROTO_ICMP, &outer_daddr);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index bec4a321165858b828abd0e4449c85afcdf6aeb0..c63ee361285551c2c154ae8fa7dbbc0c56ea7ee2 100644 (file)
@@ -123,51 +123,6 @@ int nf_conntrack_icmpv6_packet(struct nf_conn *ct,
        return NF_ACCEPT;
 }
 
-static int
-icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
-                    struct sk_buff *skb,
-                    unsigned int icmp6off)
-{
-       struct nf_conntrack_tuple intuple, origtuple;
-       const struct nf_conntrack_tuple_hash *h;
-       enum ip_conntrack_info ctinfo;
-       struct nf_conntrack_zone tmp;
-
-       WARN_ON(skb_nfct(skb));
-
-       /* Are they talking about one of our connections? */
-       if (!nf_ct_get_tuplepr(skb,
-                              skb_network_offset(skb)
-                               + sizeof(struct ipv6hdr)
-                               + sizeof(struct icmp6hdr),
-                              PF_INET6, net, &origtuple)) {
-               pr_debug("icmpv6_error: Can't get tuple\n");
-               return -NF_ACCEPT;
-       }
-
-       /* Ordinarily, we'd expect the inverted tupleproto, but it's
-          been preserved inside the ICMP. */
-       if (!nf_ct_invert_tuple(&intuple, &origtuple)) {
-               pr_debug("icmpv6_error: Can't invert tuple\n");
-               return -NF_ACCEPT;
-       }
-
-       ctinfo = IP_CT_RELATED;
-
-       h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
-                                 &intuple);
-       if (!h) {
-               pr_debug("icmpv6_error: no match\n");
-               return -NF_ACCEPT;
-       } else {
-               if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
-                       ctinfo += IP_CT_IS_REPLY;
-       }
-
-       /* Update skb to refer to this connection */
-       nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
-       return NF_ACCEPT;
-}
 
 static void icmpv6_error_log(const struct sk_buff *skb,
                             const struct nf_hook_state *state,
@@ -182,6 +137,7 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
                              unsigned int dataoff,
                              const struct nf_hook_state *state)
 {
+       union nf_inet_addr outer_daddr;
        const struct icmp6hdr *icmp6h;
        struct icmp6hdr _ih;
        int type;
@@ -210,7 +166,11 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
        if (icmp6h->icmp6_type >= 128)
                return NF_ACCEPT;
 
-       return icmpv6_error_message(state->net, tmpl, skb, dataoff);
+       memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr,
+              sizeof(outer_daddr.ip6));
+       dataoff += sizeof(*icmp6h);
+       return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
+                                      IPPROTO_ICMPV6, &outer_daddr);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index af7dc65377584d26f4b5d98ef55dd06f93d8107d..000952719adfdf49bf35a53dd800c6cecf45c14f 100644 (file)
@@ -415,9 +415,14 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
        case IPPROTO_ICMPV6:
                /* id is same for either direction... */
                keyptr = &tuple->src.u.icmp.id;
-               min = range->min_proto.icmp.id;
-               range_size = ntohs(range->max_proto.icmp.id) -
-                            ntohs(range->min_proto.icmp.id) + 1;
+               if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
+                       min = 0;
+                       range_size = 65536;
+               } else {
+                       min = ntohs(range->min_proto.icmp.id);
+                       range_size = ntohs(range->max_proto.icmp.id) -
+                                    ntohs(range->min_proto.icmp.id) + 1;
+               }
                goto find_free_id;
 #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
        case IPPROTO_GRE:
index ef7772e976cc802afc64ea25d28f1fbecde773be..1606eaa5ae0da368f4a692264456e18dfe27ec8d 100644 (file)
@@ -1545,7 +1545,7 @@ static int nft_chain_parse_hook(struct net *net,
                if (IS_ERR(type))
                        return PTR_ERR(type);
        }
-       if (!(type->hook_mask & (1 << hook->num)))
+       if (hook->num > NF_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
                return -EOPNOTSUPP;
 
        if (type->type == NFT_CHAIN_T_NAT &&
index b1f9c5303f026a14c799d03b579b9e7b577b6dc8..0b3347570265c4edc1b176f450bf920a0b81e5d4 100644 (file)
@@ -540,7 +540,7 @@ __build_packet_message(struct nfnl_log_net *log,
                        goto nla_put_failure;
        }
 
-       if (skb->tstamp) {
+       if (hooknum <= NF_INET_FORWARD && skb->tstamp) {
                struct nfulnl_msg_packet_timestamp ts;
                struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
                ts.sec = cpu_to_be64(kts.tv_sec);
index 0dcc3592d053ff41f7d8e25119d1a2fd7a90c74a..e057b2961d313cd426f2f2d37ed7e1a40c101174 100644 (file)
@@ -582,7 +582,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        if (nfqnl_put_bridge(entry, skb) < 0)
                goto nla_put_failure;
 
-       if (entskb->tstamp) {
+       if (entry->state.hook <= NF_INET_FORWARD && entskb->tstamp) {
                struct nfqnl_msg_packet_timestamp ts;
                struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
 
index c13bcd0ab491304da6ddcaa3f59aefa17ab5eacc..8dbb4d48f2ed5995dedaa8eb4f4b18a0ba91acb2 100644 (file)
@@ -163,19 +163,24 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
        s64 stamp;
 
        /*
-        * We cannot use get_seconds() instead of __net_timestamp() here.
+        * We need real time here, but we can neither use skb->tstamp
+        * nor __net_timestamp().
+        *
+        * skb->tstamp and skb->skb_mstamp_ns overlap, however, they
+        * use different clock types (real vs monotonic).
+        *
         * Suppose you have two rules:
-        *      1. match before 13:00
-        *      2. match after 13:00
+        *      1. match before 13:00
+        *      2. match after 13:00
+        *
         * If you match against processing time (get_seconds) it
         * may happen that the same packet matches both rules if
-        * it arrived at the right moment before 13:00.
+        * it arrived at the right moment before 13:00, so it would be
+        * better to check skb->tstamp and set it via __net_timestamp()
+        * if needed.  This however breaks outgoing packets tx timestamp,
+        * and causes them to get delayed forever by fq packet scheduler.
         */
-       if (skb->tstamp == 0)
-               __net_timestamp((struct sk_buff *)skb);
-
-       stamp = ktime_to_ns(skb->tstamp);
-       stamp = div_s64(stamp, NSEC_PER_SEC);
+       stamp = get_seconds();
 
        if (info->flags & XT_TIME_LOCAL_TZ)
                /* Adjust for local timezone */
index f28e937320a3b453371143a035e6967482d17cd4..216ab915dd54d4ad7f205aac9f0ab3e3291a2684 100644 (file)
@@ -988,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
        struct netlink_sock *nlk = nlk_sk(sk);
        struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
        int err = 0;
-       unsigned long groups = nladdr->nl_groups;
+       unsigned long groups;
        bool bound;
 
        if (addr_len < sizeof(struct sockaddr_nl))
@@ -996,6 +996,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
 
        if (nladdr->nl_family != AF_NETLINK)
                return -EINVAL;
+       groups = nladdr->nl_groups;
 
        /* Only superuser is allowed to listen multicasts */
        if (groups) {
index 1d3144d1990352f4eb8942220e03e225e01af19f..71ffd1a6dc7c6063c00f4c82f985fe9fc0d80dc0 100644 (file)
@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
        int i;
        int rc = proto_register(&nr_proto, 0);
 
-       if (rc != 0)
-               goto out;
+       if (rc)
+               return rc;
 
        if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
-               return -1;
+               pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
+                      __func__);
+               rc = -EINVAL;
+               goto unregister_proto;
        }
 
        dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
-       if (dev_nr == NULL) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
-               return -1;
+       if (!dev_nr) {
+               pr_err("NET/ROM: %s - unable to allocate device array\n",
+                      __func__);
+               rc = -ENOMEM;
+               goto unregister_proto;
        }
 
        for (i = 0; i < nr_ndevs; i++) {
@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
                sprintf(name, "nr%d", i);
                dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
                if (!dev) {
-                       printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
+                       rc = -ENOMEM;
                        goto fail;
                }
 
                dev->base_addr = i;
-               if (register_netdev(dev)) {
-                       printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
+               rc = register_netdev(dev);
+               if (rc) {
                        free_netdev(dev);
                        goto fail;
                }
@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
                dev_nr[i] = dev;
        }
 
-       if (sock_register(&nr_family_ops)) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
+       rc = sock_register(&nr_family_ops);
+       if (rc)
                goto fail;
-       }
 
-       register_netdevice_notifier(&nr_dev_notifier);
+       rc = register_netdevice_notifier(&nr_dev_notifier);
+       if (rc)
+               goto out_sock;
 
        ax25_register_pid(&nr_pid);
        ax25_linkfail_register(&nr_linkfail_notifier);
 
 #ifdef CONFIG_SYSCTL
-       nr_register_sysctl();
+       rc = nr_register_sysctl();
+       if (rc)
+               goto out_sysctl;
 #endif
 
        nr_loopback_init();
 
-       proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
-       proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
-       proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
-out:
-       return rc;
+       rc = -ENOMEM;
+       if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
+               goto proc_remove1;
+       if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
+                            &nr_neigh_seqops))
+               goto proc_remove2;
+       if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
+                            &nr_node_seqops))
+               goto proc_remove3;
+
+       return 0;
+
+proc_remove3:
+       remove_proc_entry("nr_neigh", init_net.proc_net);
+proc_remove2:
+       remove_proc_entry("nr", init_net.proc_net);
+proc_remove1:
+
+       nr_loopback_clear();
+       nr_rt_free();
+
+#ifdef CONFIG_SYSCTL
+       nr_unregister_sysctl();
+out_sysctl:
+#endif
+       ax25_linkfail_release(&nr_linkfail_notifier);
+       ax25_protocol_release(AX25_P_NETROM);
+       unregister_netdevice_notifier(&nr_dev_notifier);
+out_sock:
+       sock_unregister(PF_NETROM);
 fail:
        while (--i >= 0) {
                unregister_netdev(dev_nr[i]);
                free_netdev(dev_nr[i]);
        }
        kfree(dev_nr);
+unregister_proto:
        proto_unregister(&nr_proto);
-       rc = -1;
-       goto out;
+       return rc;
 }
 
 module_init(nr_proto_init);
index 215ad22a96476ebb9d30919e99d67bda8e1ce88f..93d13f01998133a2b6c6b3256bb19679f14cea65 100644 (file)
@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
        }
 }
 
-void __exit nr_loopback_clear(void)
+void nr_loopback_clear(void)
 {
        del_timer_sync(&loopback_timer);
        skb_queue_purge(&loopback_queue);
index 6485f593e2f09bc3f215e2ad2c638154de738487..b76aa668a94bce6c6d1280d5cbf307d6ce94e013 100644 (file)
@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
 /*
  *     Free all memory associated with the nodes and routes lists.
  */
-void __exit nr_rt_free(void)
+void nr_rt_free(void)
 {
        struct nr_neigh *s = NULL;
        struct nr_node  *t = NULL;
index ba1c368b3f186e140149a75e8d98dee24587a020..771011b84270e87854a8c47db1c0253640449fcc 100644 (file)
@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
        { }
 };
 
-void __init nr_register_sysctl(void)
+int __init nr_register_sysctl(void)
 {
        nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
+       if (!nr_table_header)
+               return -ENOMEM;
+       return 0;
 }
 
 void nr_unregister_sysctl(void)
index d6cc97fbbbb02458d958a8f493e37e6249db4db6..2b969f99ef1311f845baea874a985714cb051c7c 100644 (file)
@@ -543,6 +543,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
        struct rds_sock *rs = rds_sk_to_rs(sk);
        int ret = 0;
 
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
+
        lock_sock(sk);
 
        switch (uaddr->sa_family) {
index 17c9d9f0c8483b4b0a887e69e7caac246c369423..0f4398e7f2a7add7c20b6fdd333c40af4e719c92 100644 (file)
@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        /* We allow an RDS socket to be bound to either IPv4 or IPv6
         * address.
         */
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
        if (uaddr->sa_family == AF_INET) {
                struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
 
index 31cf37da4510c3b53377ea40d4880638a89775e5..93c0437e6a5fd284b3e6dd1283e31839a305be7b 100644 (file)
@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
        else
                pool = rds_ibdev->mr_1m_pool;
 
+       if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+               queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+       /* Switch pools if one of the pool is reaching upper limit */
+       if (atomic_read(&pool->dirty_count) >=  pool->max_items * 9 / 10) {
+               if (pool->pool_type == RDS_IB_MR_8K_POOL)
+                       pool = rds_ibdev->mr_1m_pool;
+               else
+                       pool = rds_ibdev->mr_8k_pool;
+       }
+
        ibmr = rds_ib_try_reuse_ibmr(pool);
        if (ibmr)
                return ibmr;
index 63c8d107adcfbec096b3dbcead8de98ec6327bc1..d664e9ade74dea264c06e0ac03997ebdc0254235 100644 (file)
@@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
        struct rds_ib_mr *ibmr = NULL;
        int iter = 0;
 
-       if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
-               queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
        while (1) {
                ibmr = rds_ib_reuse_mr(pool);
                if (ibmr)
index 7af4f99c4a9321bb3eef8d77f7e2dedf981e19f3..094a6621f8e803ae41101899ef02f080d91ac0f3 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 
 static struct sk_buff_head loopback_queue;
+#define ROSE_LOOPBACK_LIMIT 1000
 static struct timer_list loopback_timer;
 
 static void rose_set_loopback_timer(void);
@@ -35,29 +36,27 @@ static int rose_loopback_running(void)
 
 int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
 {
-       struct sk_buff *skbn;
+       struct sk_buff *skbn = NULL;
 
-       skbn = skb_clone(skb, GFP_ATOMIC);
+       if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
+               skbn = skb_clone(skb, GFP_ATOMIC);
 
-       kfree_skb(skb);
-
-       if (skbn != NULL) {
+       if (skbn) {
+               consume_skb(skb);
                skb_queue_tail(&loopback_queue, skbn);
 
                if (!rose_loopback_running())
                        rose_set_loopback_timer();
+       } else {
+               kfree_skb(skb);
        }
 
        return 1;
 }
 
-
 static void rose_set_loopback_timer(void)
 {
-       del_timer(&loopback_timer);
-
-       loopback_timer.expires  = jiffies + 10;
-       add_timer(&loopback_timer);
+       mod_timer(&loopback_timer, jiffies + 10);
 }
 
 static void rose_loopback_timer(struct timer_list *unused)
@@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused)
        struct sock *sk;
        unsigned short frametype;
        unsigned int lci_i, lci_o;
+       int count;
 
-       while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
+       for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
+               skb = skb_dequeue(&loopback_queue);
+               if (!skb)
+                       return;
                if (skb->len < ROSE_MIN_LEN) {
                        kfree_skb(skb);
                        continue;
@@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused)
                        kfree_skb(skb);
                }
        }
+       if (!skb_queue_empty(&loopback_queue))
+               mod_timer(&loopback_timer, jiffies + 1);
 }
 
 void __exit rose_loopback_clear(void)
index 96f2952bbdfd6e62ffcec87f0a565378abbfe4f5..ae8c5d7f3bf1e29460e5b96b05b7b1b1ecd4ce15 100644 (file)
@@ -135,7 +135,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
        struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
        struct rxrpc_local *local;
        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
-       u16 service_id = srx->srx_service;
+       u16 service_id;
        int ret;
 
        _enter("%p,%p,%d", rx, saddr, len);
@@ -143,6 +143,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
        ret = rxrpc_validate_address(rx, srx, len);
        if (ret < 0)
                goto error;
+       service_id = srx->srx_service;
 
        lock_sock(&rx->sk);
 
@@ -370,18 +371,22 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
  * rxrpc_kernel_check_life - Check to see whether a call is still alive
  * @sock: The socket the call is on
  * @call: The call to check
+ * @_life: Where to store the life value
  *
  * Allow a kernel service to find out whether a call is still alive - ie. we're
- * getting ACKs from the server.  Returns a number representing the life state
- * which can be compared to that returned by a previous call.
+ * getting ACKs from the server.  Passes back in *_life a number representing
+ * the life state which can be compared to that returned by a previous call and
+ * return true if the call is still alive.
  *
  * If the life state stalls, rxrpc_kernel_probe_life() should be called and
  * then 2RTT waited.
  */
-u32 rxrpc_kernel_check_life(const struct socket *sock,
-                           const struct rxrpc_call *call)
+bool rxrpc_kernel_check_life(const struct socket *sock,
+                            const struct rxrpc_call *call,
+                            u32 *_life)
 {
-       return call->acks_latest;
+       *_life = call->acks_latest;
+       return call->state != RXRPC_CALL_COMPLETE;
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
index 4b1a534d290a79e3f035ee60766b4f2ebb2e35c2..062ca9dc29b8ab2fa7381c606791d4fd39657962 100644 (file)
@@ -654,6 +654,7 @@ struct rxrpc_call {
        u8                      ackr_reason;    /* reason to ACK */
        u16                     ackr_skew;      /* skew on packet being ACK'd */
        rxrpc_serial_t          ackr_serial;    /* serial of packet being ACK'd */
+       rxrpc_serial_t          ackr_first_seq; /* first sequence number received */
        rxrpc_seq_t             ackr_prev_seq;  /* previous sequence number received */
        rxrpc_seq_t             ackr_consumed;  /* Highest packet shown consumed */
        rxrpc_seq_t             ackr_seen;      /* Highest packet shown seen */
index b6fca8ebb1173f4de1047e96315c26072666c2e9..8d31fb4c51e17c1934face0f4320dfd219525a66 100644 (file)
@@ -153,7 +153,8 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
  * pass a connection-level abort onto all calls on that connection
  */
 static void rxrpc_abort_calls(struct rxrpc_connection *conn,
-                             enum rxrpc_call_completion compl)
+                             enum rxrpc_call_completion compl,
+                             rxrpc_serial_t serial)
 {
        struct rxrpc_call *call;
        int i;
@@ -173,6 +174,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
                                                  call->call_id, 0,
                                                  conn->abort_code,
                                                  conn->error);
+                       else
+                               trace_rxrpc_rx_abort(call, serial,
+                                                    conn->abort_code);
                        if (rxrpc_set_call_completion(call, compl,
                                                      conn->abort_code,
                                                      conn->error))
@@ -213,8 +217,6 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
        conn->state = RXRPC_CONN_LOCALLY_ABORTED;
        spin_unlock_bh(&conn->state_lock);
 
-       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
-
        msg.msg_name    = &conn->params.peer->srx.transport;
        msg.msg_namelen = conn->params.peer->srx.transport_len;
        msg.msg_control = NULL;
@@ -242,6 +244,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
        len = iov[0].iov_len + iov[1].iov_len;
 
        serial = atomic_inc_return(&conn->serial);
+       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
        whdr.serial = htonl(serial);
        _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
 
@@ -321,7 +324,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                conn->error = -ECONNABORTED;
                conn->abort_code = abort_code;
                conn->state = RXRPC_CONN_REMOTELY_ABORTED;
-               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
+               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
                return -ECONNABORTED;
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
index 9128aa0e40aac8f51a84f10dc0bd0dd5933c1e23..c2c35cf4e3089038bcc73663f0a0d3ccf24b9743 100644 (file)
@@ -837,7 +837,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                u8 acks[RXRPC_MAXACKS];
        } buf;
        rxrpc_serial_t acked_serial;
-       rxrpc_seq_t first_soft_ack, hard_ack;
+       rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
        int nr_acks, offset, ioffset;
 
        _enter("");
@@ -851,13 +851,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 
        acked_serial = ntohl(buf.ack.serial);
        first_soft_ack = ntohl(buf.ack.firstPacket);
+       prev_pkt = ntohl(buf.ack.previousPacket);
        hard_ack = first_soft_ack - 1;
        nr_acks = buf.ack.nAcks;
        summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
                              buf.ack.reason : RXRPC_ACK__INVALID);
 
        trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
-                          first_soft_ack, ntohl(buf.ack.previousPacket),
+                          first_soft_ack, prev_pkt,
                           summary.ack_reason, nr_acks);
 
        if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
@@ -878,8 +879,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                                  rxrpc_propose_ack_respond_to_ack);
        }
 
-       /* Discard any out-of-order or duplicate ACKs. */
-       if (before_eq(sp->hdr.serial, call->acks_latest))
+       /* Discard any out-of-order or duplicate ACKs (outside lock). */
+       if (before(first_soft_ack, call->ackr_first_seq) ||
+           before(prev_pkt, call->ackr_prev_seq))
                return;
 
        buf.info.rxMTU = 0;
@@ -890,12 +892,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 
        spin_lock(&call->input_lock);
 
-       /* Discard any out-of-order or duplicate ACKs. */
-       if (before_eq(sp->hdr.serial, call->acks_latest))
+       /* Discard any out-of-order or duplicate ACKs (inside lock). */
+       if (before(first_soft_ack, call->ackr_first_seq) ||
+           before(prev_pkt, call->ackr_prev_seq))
                goto out;
        call->acks_latest_ts = skb->tstamp;
        call->acks_latest = sp->hdr.serial;
 
+       call->ackr_first_seq = first_soft_ack;
+       call->ackr_prev_seq = prev_pkt;
+
        /* Parse rwind and mtu sizes if provided. */
        if (buf.info.rxMTU)
                rxrpc_input_ackinfo(call, skb, &buf.info);
@@ -1155,19 +1161,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
  * handle data received on the local endpoint
  * - may be called in interrupt context
  *
- * The socket is locked by the caller and this prevents the socket from being
- * shut down and the local endpoint from going away, thus sk_user_data will not
- * be cleared until this function returns.
+ * [!] Note that as this is called from the encap_rcv hook, the socket is not
+ * held locked by the caller and nothing prevents sk_user_data on the UDP from
+ * being cleared in the middle of processing this function.
  *
  * Called with the RCU read lock held from the IP layer via UDP.
  */
 int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
 {
+       struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
        struct rxrpc_connection *conn;
        struct rxrpc_channel *chan;
        struct rxrpc_call *call = NULL;
        struct rxrpc_skb_priv *sp;
-       struct rxrpc_local *local = udp_sk->sk_user_data;
        struct rxrpc_peer *peer = NULL;
        struct rxrpc_sock *rx = NULL;
        unsigned int channel;
@@ -1175,6 +1181,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
 
        _enter("%p", udp_sk);
 
+       if (unlikely(!local)) {
+               kfree_skb(skb);
+               return 0;
+       }
        if (skb->tstamp == 0)
                skb->tstamp = ktime_get_real();
 
index 15cf42d5b53a56d8d19cabdc8c2b55156d73d28a..01959db51445ca00e6044d8a849e698c4ab17a33 100644 (file)
@@ -304,7 +304,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
        ret = -ENOMEM;
 sock_error:
        mutex_unlock(&rxnet->local_mutex);
-       kfree(local);
+       if (local)
+               call_rcu(&local->rcu, rxrpc_local_rcu);
        _leave(" = %d", ret);
        return ERR_PTR(ret);
 
index bc05af89fc381daa46d7cf8032c9900dfbcea65c..6e84d878053c7b8821483c0c1447a5c338d5fade 100644 (file)
@@ -157,6 +157,11 @@ void rxrpc_error_report(struct sock *sk)
 
        _enter("%p{%d}", sk, local->debug_id);
 
+       /* Clear the outstanding error value on the socket so that it doesn't
+        * cause kernel_sendmsg() to return it later.
+        */
+       sock_error(sk);
+
        skb = sock_dequeue_err_skb(sk);
        if (!skb) {
                _leave("UDP socket errqueue empty");
index 46c9312085b1ba81b4941607f751a07adb8f3c20..bec64deb7b0a2794345c896827846fa8bac57e19 100644 (file)
@@ -152,12 +152,13 @@ static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
 }
 
 /*
- * Queue a DATA packet for transmission, set the resend timeout and send the
- * packet immediately
+ * Queue a DATA packet for transmission, set the resend timeout and send
+ * the packet immediately.  Returns the error from rxrpc_send_data_packet()
+ * in case the caller wants to do something with it.
  */
-static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
-                              struct sk_buff *skb, bool last,
-                              rxrpc_notify_end_tx_t notify_end_tx)
+static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
+                             struct sk_buff *skb, bool last,
+                             rxrpc_notify_end_tx_t notify_end_tx)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        unsigned long now;
@@ -250,7 +251,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
 
 out:
        rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
-       _leave("");
+       _leave(" = %d", ret);
+       return ret;
 }
 
 /*
@@ -423,9 +425,10 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                        if (ret < 0)
                                goto out;
 
-                       rxrpc_queue_packet(rx, call, skb,
-                                          !msg_data_left(msg) && !more,
-                                          notify_end_tx);
+                       ret = rxrpc_queue_packet(rx, call, skb,
+                                                !msg_data_left(msg) && !more,
+                                                notify_end_tx);
+                       /* Should check for failure here */
                        skb = NULL;
                }
        } while (msg_data_left(msg) > 0);
index 9874e60c9b0d00924042c1b377bc0c777edfc4cb..4583fa914e62aedaf2ef29c5cf668f0caee4eade 100644 (file)
@@ -4847,7 +4847,8 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
        }
 
        /* Validate addr_len before calling common connect/connectx routine. */
-       af = sctp_get_af_specific(addr->sa_family);
+       af = addr_len < offsetofend(struct sockaddr, sa_family) ? NULL :
+               sctp_get_af_specific(addr->sa_family);
        if (!af || addr_len < af->sockaddr_len) {
                err = -EINVAL;
        } else {
index 77ef53596d18c5fd091b6888efbc8b35063087a8..6f869ef49b3226806ab7f7973821870d77618004 100644 (file)
@@ -167,10 +167,9 @@ static int smc_release(struct socket *sock)
 
        if (sk->sk_state == SMC_CLOSED) {
                if (smc->clcsock) {
-                       mutex_lock(&smc->clcsock_release_lock);
-                       sock_release(smc->clcsock);
-                       smc->clcsock = NULL;
-                       mutex_unlock(&smc->clcsock_release_lock);
+                       release_sock(sk);
+                       smc_clcsock_release(smc);
+                       lock_sock(sk);
                }
                if (!smc->use_fallback)
                        smc_conn_free(&smc->conn);
@@ -446,10 +445,19 @@ static void smc_link_save_peer_info(struct smc_link *link,
        link->peer_mtu = clc->qp_mtu;
 }
 
+static void smc_switch_to_fallback(struct smc_sock *smc)
+{
+       smc->use_fallback = true;
+       if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
+               smc->clcsock->file = smc->sk.sk_socket->file;
+               smc->clcsock->file->private_data = smc->clcsock;
+       }
+}
+
 /* fall back during connect */
 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
 {
-       smc->use_fallback = true;
+       smc_switch_to_fallback(smc);
        smc->fallback_rsn = reason_code;
        smc_copy_sock_settings_to_clc(smc);
        if (smc->sk.sk_state == SMC_INIT)
@@ -775,10 +783,14 @@ static void smc_connect_work(struct work_struct *work)
                smc->sk.sk_err = -rc;
 
 out:
-       if (smc->sk.sk_err)
-               smc->sk.sk_state_change(&smc->sk);
-       else
-               smc->sk.sk_write_space(&smc->sk);
+       if (!sock_flag(&smc->sk, SOCK_DEAD)) {
+               if (smc->sk.sk_err) {
+                       smc->sk.sk_state_change(&smc->sk);
+               } else { /* allow polling before and after fallback decision */
+                       smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
+                       smc->sk.sk_write_space(&smc->sk);
+               }
+       }
        kfree(smc->connect_info);
        smc->connect_info = NULL;
        release_sock(&smc->sk);
@@ -872,11 +884,11 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
        if  (rc < 0)
                lsk->sk_err = -rc;
        if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
+               new_sk->sk_prot->unhash(new_sk);
                if (new_clcsock)
                        sock_release(new_clcsock);
                new_sk->sk_state = SMC_CLOSED;
                sock_set_flag(new_sk, SOCK_DEAD);
-               new_sk->sk_prot->unhash(new_sk);
                sock_put(new_sk); /* final */
                *new_smc = NULL;
                goto out;
@@ -927,16 +939,21 @@ struct sock *smc_accept_dequeue(struct sock *parent,
 
                smc_accept_unlink(new_sk);
                if (new_sk->sk_state == SMC_CLOSED) {
+                       new_sk->sk_prot->unhash(new_sk);
                        if (isk->clcsock) {
                                sock_release(isk->clcsock);
                                isk->clcsock = NULL;
                        }
-                       new_sk->sk_prot->unhash(new_sk);
                        sock_put(new_sk); /* final */
                        continue;
                }
-               if (new_sock)
+               if (new_sock) {
                        sock_graft(new_sk, new_sock);
+                       if (isk->use_fallback) {
+                               smc_sk(new_sk)->clcsock->file = new_sock->file;
+                               isk->clcsock->file->private_data = isk->clcsock;
+                       }
+               }
                return new_sk;
        }
        return NULL;
@@ -956,6 +973,7 @@ void smc_close_non_accepted(struct sock *sk)
                sock_set_flag(sk, SOCK_DEAD);
                sk->sk_shutdown |= SHUTDOWN_MASK;
        }
+       sk->sk_prot->unhash(sk);
        if (smc->clcsock) {
                struct socket *tcp;
 
@@ -971,7 +989,6 @@ void smc_close_non_accepted(struct sock *sk)
                        smc_conn_free(&smc->conn);
        }
        release_sock(sk);
-       sk->sk_prot->unhash(sk);
        sock_put(sk); /* final sock_put */
 }
 
@@ -1037,13 +1054,13 @@ static void smc_listen_out(struct smc_sock *new_smc)
        struct smc_sock *lsmc = new_smc->listen_smc;
        struct sock *newsmcsk = &new_smc->sk;
 
-       lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
        if (lsmc->sk.sk_state == SMC_LISTEN) {
+               lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
                smc_accept_enqueue(&lsmc->sk, newsmcsk);
+               release_sock(&lsmc->sk);
        } else { /* no longer listening */
                smc_close_non_accepted(newsmcsk);
        }
-       release_sock(&lsmc->sk);
 
        /* Wake up accept */
        lsmc->sk.sk_data_ready(&lsmc->sk);
@@ -1087,7 +1104,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
                return;
        }
        smc_conn_free(&new_smc->conn);
-       new_smc->use_fallback = true;
+       smc_switch_to_fallback(new_smc);
        new_smc->fallback_rsn = reason_code;
        if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
                if (smc_clc_send_decline(new_smc, reason_code) < 0) {
@@ -1237,6 +1254,9 @@ static void smc_listen_work(struct work_struct *work)
        int rc = 0;
        u8 ibport;
 
+       if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
+               return smc_listen_out_err(new_smc);
+
        if (new_smc->use_fallback) {
                smc_listen_out_connected(new_smc);
                return;
@@ -1244,7 +1264,7 @@ static void smc_listen_work(struct work_struct *work)
 
        /* check if peer is smc capable */
        if (!tcp_sk(newclcsock->sk)->syn_smc) {
-               new_smc->use_fallback = true;
+               smc_switch_to_fallback(new_smc);
                new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
                smc_listen_out_connected(new_smc);
                return;
@@ -1501,7 +1521,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 
        if (msg->msg_flags & MSG_FASTOPEN) {
                if (sk->sk_state == SMC_INIT) {
-                       smc->use_fallback = true;
+                       smc_switch_to_fallback(smc);
                        smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
                } else {
                        rc = -EINVAL;
@@ -1703,7 +1723,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
        case TCP_FASTOPEN_NO_COOKIE:
                /* option not supported by SMC */
                if (sk->sk_state == SMC_INIT) {
-                       smc->use_fallback = true;
+                       smc_switch_to_fallback(smc);
                        smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
                } else {
                        if (!smc->use_fallback)
index 2ad37e998509310f210f4e3654cc054487731e87..fc06720b53c1442a8dd3222ed7be482a8993ab92 100644 (file)
 
 #define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME     (5 * HZ)
 
+/* release the clcsock that is assigned to the smc_sock */
+void smc_clcsock_release(struct smc_sock *smc)
+{
+       struct socket *tcp;
+
+       if (smc->listen_smc && current_work() != &smc->smc_listen_work)
+               cancel_work_sync(&smc->smc_listen_work);
+       mutex_lock(&smc->clcsock_release_lock);
+       if (smc->clcsock) {
+               tcp = smc->clcsock;
+               smc->clcsock = NULL;
+               sock_release(tcp);
+       }
+       mutex_unlock(&smc->clcsock_release_lock);
+}
+
 static void smc_close_cleanup_listen(struct sock *parent)
 {
        struct sock *sk;
@@ -321,6 +337,7 @@ static void smc_close_passive_work(struct work_struct *work)
                                                   close_work);
        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
        struct smc_cdc_conn_state_flags *rxflags;
+       bool release_clcsock = false;
        struct sock *sk = &smc->sk;
        int old_state;
 
@@ -400,13 +417,13 @@ static void smc_close_passive_work(struct work_struct *work)
                if ((sk->sk_state == SMC_CLOSED) &&
                    (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
                        smc_conn_free(conn);
-                       if (smc->clcsock) {
-                               sock_release(smc->clcsock);
-                               smc->clcsock = NULL;
-                       }
+                       if (smc->clcsock)
+                               release_clcsock = true;
                }
        }
        release_sock(sk);
+       if (release_clcsock)
+               smc_clcsock_release(smc);
        sock_put(sk); /* sock_hold done by schedulers of close_work */
 }
 
index 19eb6a211c23cd12fad8f5077a26209bb05c3d33..e0e3b5df25d2474b8aadd2e7639d07e0c8c631ef 100644 (file)
@@ -23,5 +23,6 @@ void smc_close_wake_tx_prepared(struct smc_sock *smc);
 int smc_close_active(struct smc_sock *smc);
 int smc_close_shutdown_write(struct smc_sock *smc);
 void smc_close_init(struct smc_sock *smc);
+void smc_clcsock_release(struct smc_sock *smc);
 
 #endif /* SMC_CLOSE_H */
index 2fff79db1a59ce3d2908722941dd9355810c65a0..e89e918b88e09acaad980da8dc34e3d921fe69be 100644 (file)
@@ -289,6 +289,11 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
        INIT_LIST_HEAD(&smcd->vlan);
        smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
                                                 WQ_MEM_RECLAIM, name);
+       if (!smcd->event_wq) {
+               kfree(smcd->conn);
+               kfree(smcd);
+               return NULL;
+       }
        return smcd;
 }
 EXPORT_SYMBOL_GPL(smcd_alloc_dev);
index 8d2f6296279c96827e332153ff274f522a3cb689..0285c7f9e79b6edb6a288be8bb50092a55bc7cfb 100644 (file)
@@ -603,7 +603,8 @@ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = genl_info_net(info);
 
-       return smc_pnet_remove_by_pnetid(net, NULL);
+       smc_pnet_remove_by_pnetid(net, NULL);
+       return 0;
 }
 
 /* SMC_PNETID generic netlink operation definition */
index 860dcfb95ee472fed5d74e6015af2acce178c0a7..fa6c977b4c41a4a0b8deeb99c3e5d0d03c55de2b 100644 (file)
@@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                        /* We are going to append to the frags_list of head.
                         * Need to unshare the frag_list.
                         */
-                       if (skb_has_frag_list(head)) {
-                               err = skb_unclone(head, GFP_ATOMIC);
-                               if (err) {
-                                       STRP_STATS_INCR(strp->stats.mem_fail);
-                                       desc->error = err;
-                                       return 0;
-                               }
+                       err = skb_unclone(head, GFP_ATOMIC);
+                       if (err) {
+                               STRP_STATS_INCR(strp->stats.mem_fail);
+                               desc->error = err;
+                               return 0;
                        }
 
                        if (unlikely(skb_shinfo(head)->frag_list)) {
index 12bb23b8e0c50c80abd51c69a5c5a2ea6433a723..261131dfa1f1ba3900d85088a6cfde659bbe231a 100644 (file)
@@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
        h->last_refresh = now;
 }
 
+static inline int cache_is_valid(struct cache_head *h);
 static void cache_fresh_locked(struct cache_head *head, time_t expiry,
                                struct cache_detail *detail);
 static void cache_fresh_unlocked(struct cache_head *head,
@@ -105,6 +106,8 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
                        if (cache_is_expired(detail, tmp)) {
                                hlist_del_init_rcu(&tmp->cache_list);
                                detail->entries --;
+                               if (cache_is_valid(tmp) == -EAGAIN)
+                                       set_bit(CACHE_NEGATIVE, &tmp->flags);
                                cache_fresh_locked(tmp, 0, detail);
                                freeme = tmp;
                                break;
index 1d0395ef62c95b9285bdaca47f96f48a3d5093bf..8ff11dc98d7f93fefeff6ecc53ff6d7815da47f8 100644 (file)
@@ -2081,8 +2081,8 @@ call_transmit_status(struct rpc_task *task)
         * test first.
         */
        if (rpc_task_transmitted(task)) {
-               if (task->tk_status == 0)
-                       xprt_request_wait_receive(task);
+               task->tk_status = 0;
+               xprt_request_wait_receive(task);
                return;
        }
 
@@ -2167,6 +2167,9 @@ call_bc_transmit_status(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
 
+       if (rpc_task_transmitted(task))
+               task->tk_status = 0;
+
        dprint_status(task);
 
        switch (task->tk_status) {
index 341ecd796aa473d35e770d4dfbf413ee3bcdc1cf..131aa2f0fd27c46e14f024b317dd65c786b0bea4 100644 (file)
@@ -869,6 +869,8 @@ void tipc_link_reset(struct tipc_link *l)
        __skb_queue_head_init(&list);
 
        l->in_session = false;
+       /* Force re-synch of peer session number before establishing */
+       l->peer_session--;
        l->session++;
        l->mtu = l->advertised_mtu;
 
index bff241f0352501aba8605622df16f2c85044c09b..89993afe0fbd38713dd3d0499cc79e6c3e159b4d 100644 (file)
@@ -909,7 +909,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
        for (; i < TIPC_NAMETBL_SIZE; i++) {
                head = &tn->nametbl->services[i];
 
-               if (*last_type) {
+               if (*last_type ||
+                   (!i && *last_key && (*last_lower == *last_key))) {
                        service = tipc_service_find(net, *last_type);
                        if (!service)
                                return -EPIPE;
index 3481e4906bd6a4a3e1f27ec5d49106090c7ec7f1..9df82a573aa7768f583999e740022ce00295bbd4 100644 (file)
@@ -38,6 +38,8 @@
 
 #include <linux/sysctl.h>
 
+static int zero;
+static int one = 1;
 static struct ctl_table_header *tipc_ctl_hdr;
 
 static struct ctl_table tipc_table[] = {
@@ -46,14 +48,16 @@ static struct ctl_table tipc_table[] = {
                .data           = &sysctl_tipc_rmem,
                .maxlen         = sizeof(sysctl_tipc_rmem),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
        },
        {
                .procname       = "named_timeout",
                .data           = &sysctl_tipc_named_timeout,
                .maxlen         = sizeof(sysctl_tipc_named_timeout),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
        },
        {
                .procname       = "sk_filter",
index 135a7ee9db034149252f8df3a56f7834ff573eab..cc0256939eb63e2afef7dddb33f57583d1387cc3 100644 (file)
@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
 
 static void tls_device_free_ctx(struct tls_context *ctx)
 {
-       if (ctx->tx_conf == TLS_HW)
+       if (ctx->tx_conf == TLS_HW) {
                kfree(tls_offload_ctx_tx(ctx));
+               kfree(ctx->tx.rec_seq);
+               kfree(ctx->tx.iv);
+       }
 
        if (ctx->rx_conf == TLS_HW)
                kfree(tls_offload_ctx_rx(ctx));
@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk)
 }
 EXPORT_SYMBOL(tls_device_sk_destruct);
 
+void tls_device_free_resources_tx(struct sock *sk)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+       tls_free_partial_record(sk, tls_ctx);
+}
+
 static void tls_append_frag(struct tls_record_info *record,
                            struct page_frag *pfrag,
                            int size)
@@ -894,7 +904,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
        goto release_netdev;
 
 free_sw_resources:
+       up_read(&device_offload_lock);
        tls_sw_free_resources_rx(sk);
+       down_read(&device_offload_lock);
 release_ctx:
        ctx->priv_ctx_rx = NULL;
 release_netdev:
@@ -929,8 +941,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
        }
 out:
        up_read(&device_offload_lock);
-       kfree(tls_ctx->rx.rec_seq);
-       kfree(tls_ctx->rx.iv);
        tls_sw_release_resources_rx(sk);
 }
 
index 54c3a758f2a7d9bf32f9ab13c7dbe1389e54ecc3..a3ebd4b02714c9ea46c2622e3b8742d637a2541c 100644 (file)
@@ -194,6 +194,9 @@ static void update_chksum(struct sk_buff *skb, int headln)
 
 static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
 {
+       struct sock *sk = skb->sk;
+       int delta;
+
        skb_copy_header(nskb, skb);
 
        skb_put(nskb, skb->len);
@@ -201,11 +204,15 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
        update_chksum(nskb, headln);
 
        nskb->destructor = skb->destructor;
-       nskb->sk = skb->sk;
+       nskb->sk = sk;
        skb->destructor = NULL;
        skb->sk = NULL;
-       refcount_add(nskb->truesize - skb->truesize,
-                    &nskb->sk->sk_wmem_alloc);
+
+       delta = nskb->truesize - skb->truesize;
+       if (likely(delta < 0))
+               WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
+       else if (delta)
+               refcount_add(delta, &sk->sk_wmem_alloc);
 }
 
 /* This function may be called after the user socket is already
index df921a2904b9b5b96acab53e52fa66090a900660..478603f43964d557146ae141ba45d4b0cae538fd 100644 (file)
@@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
        return tls_push_sg(sk, ctx, sg, offset, flags);
 }
 
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
+{
+       struct scatterlist *sg;
+
+       sg = ctx->partially_sent_record;
+       if (!sg)
+               return false;
+
+       while (1) {
+               put_page(sg_page(sg));
+               sk_mem_uncharge(sk, sg->length);
+
+               if (sg_is_last(sg))
+                       break;
+               sg++;
+       }
+       ctx->partially_sent_record = NULL;
+       return true;
+}
+
 static void tls_write_space(struct sock *sk)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
@@ -267,13 +287,14 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
                kfree(ctx->tx.rec_seq);
                kfree(ctx->tx.iv);
                tls_sw_free_resources_tx(sk);
+#ifdef CONFIG_TLS_DEVICE
+       } else if (ctx->tx_conf == TLS_HW) {
+               tls_device_free_resources_tx(sk);
+#endif
        }
 
-       if (ctx->rx_conf == TLS_SW) {
-               kfree(ctx->rx.rec_seq);
-               kfree(ctx->rx.iv);
+       if (ctx->rx_conf == TLS_SW)
                tls_sw_free_resources_rx(sk);
-       }
 
 #ifdef CONFIG_TLS_DEVICE
        if (ctx->rx_conf == TLS_HW)
index 20b1912279694a457fb4bc9d5287186ced1afacc..29d6af43dd249dd72c175ac7401a308ef6193c2c 100644 (file)
@@ -2052,20 +2052,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
        /* Free up un-sent records in tx_list. First, free
         * the partially sent record if any at head of tx_list.
         */
-       if (tls_ctx->partially_sent_record) {
-               struct scatterlist *sg = tls_ctx->partially_sent_record;
-
-               while (1) {
-                       put_page(sg_page(sg));
-                       sk_mem_uncharge(sk, sg->length);
-
-                       if (sg_is_last(sg))
-                               break;
-                       sg++;
-               }
-
-               tls_ctx->partially_sent_record = NULL;
-
+       if (tls_free_partial_record(sk, tls_ctx)) {
                rec = list_first_entry(&ctx->tx_list,
                                       struct tls_rec, list);
                list_del(&rec->list);
@@ -2091,6 +2078,9 @@ void tls_sw_release_resources_rx(struct sock *sk)
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
 
+       kfree(tls_ctx->rx.rec_seq);
+       kfree(tls_ctx->rx.iv);
+
        if (ctx->aead_recv) {
                kfree_skb(ctx->recv_pkt);
                ctx->recv_pkt = NULL;
index 25a9e3b5c1542a71fff0f4f2ab8166f977786f8c..47e30a58566c2817696655212a8da4c5fc00f00e 100644 (file)
@@ -13650,7 +13650,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEAUTHENTICATE,
@@ -13701,7 +13702,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
@@ -13709,7 +13711,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DISCONNECT,
@@ -13738,7 +13741,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEL_PMKSA,
@@ -14090,7 +14094,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_WIPHY |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_SET_QOS_MAP,
@@ -14145,7 +14150,8 @@ static const struct genl_ops nl80211_ops[] = {
                .doit = nl80211_set_pmk,
                .policy = nl80211_policy,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEL_PMK,
index 2f1bf91eb2265a26bcebeeb3589735e77a3a9daa..0ba778f371cb25fe6b610dc6e112beed2db55e9b 100644 (file)
@@ -1309,6 +1309,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
        return dfs_region1;
 }
 
+static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1,
+                                   const struct ieee80211_wmm_ac *wmm_ac2,
+                                   struct ieee80211_wmm_ac *intersect)
+{
+       intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min);
+       intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max);
+       intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot);
+       intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn);
+}
+
 /*
  * Helper for regdom_intersect(), this does the real
  * mathematical intersection fun
@@ -1323,6 +1333,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        struct ieee80211_freq_range *freq_range;
        const struct ieee80211_power_rule *power_rule1, *power_rule2;
        struct ieee80211_power_rule *power_rule;
+       const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2;
+       struct ieee80211_wmm_rule *wmm_rule;
        u32 freq_diff, max_bandwidth1, max_bandwidth2;
 
        freq_range1 = &rule1->freq_range;
@@ -1333,6 +1345,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        power_rule2 = &rule2->power_rule;
        power_rule = &intersected_rule->power_rule;
 
+       wmm_rule1 = &rule1->wmm_rule;
+       wmm_rule2 = &rule2->wmm_rule;
+       wmm_rule = &intersected_rule->wmm_rule;
+
        freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
                                         freq_range2->start_freq_khz);
        freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
@@ -1376,6 +1392,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
                                           rule2->dfs_cac_ms);
 
+       if (rule1->has_wmm && rule2->has_wmm) {
+               u8 ac;
+
+               for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+                       reg_wmm_rules_intersect(&wmm_rule1->client[ac],
+                                               &wmm_rule2->client[ac],
+                                               &wmm_rule->client[ac]);
+                       reg_wmm_rules_intersect(&wmm_rule1->ap[ac],
+                                               &wmm_rule2->ap[ac],
+                                               &wmm_rule->ap[ac]);
+               }
+
+               intersected_rule->has_wmm = true;
+       } else if (rule1->has_wmm) {
+               *wmm_rule = *wmm_rule1;
+               intersected_rule->has_wmm = true;
+       } else if (rule2->has_wmm) {
+               *wmm_rule = *wmm_rule2;
+               intersected_rule->has_wmm = true;
+       } else {
+               intersected_rule->has_wmm = false;
+       }
+
        if (!is_valid_reg_rule(intersected_rule))
                return -EINVAL;
 
index 287518c6caa40204525993d8b2477e269324f378..04d888628f29dcca952d38d48b785d5e2c56dfef 100644 (file)
@@ -190,10 +190,9 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
        /* copy subelement as we need to change its content to
         * mark an ie after it is processed.
         */
-       sub_copy = kmalloc(subie_len, gfp);
+       sub_copy = kmemdup(subelement, subie_len, gfp);
        if (!sub_copy)
                return 0;
-       memcpy(sub_copy, subelement, subie_len);
 
        pos = &new_ie[0];
 
index e4b8db5e81ec710db0ee8e779046e04263441e08..75899b62bdc9ed2116a1420a6035c904307f4838 100644 (file)
@@ -1220,9 +1220,11 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
        else if (rate->bw == RATE_INFO_BW_HE_RU &&
                 rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26)
                result = rates_26[rate->he_gi];
-       else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
-                     rate->bw, rate->he_ru_alloc))
+       else {
+               WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
+                    rate->bw, rate->he_ru_alloc);
                return 0;
+       }
 
        /* now scale to the appropriate MCS */
        tmp = result;
index 27400b0cd732e2f37733e321dee5742c8cdf6721..000dc6437893baa133224c1d9922e97433c573f4 100644 (file)
@@ -13,7 +13,7 @@ gen-atomic-long.sh              asm-generic/atomic-long.h
 gen-atomic-fallback.sh          linux/atomic-fallback.h
 EOF
 while read script header; do
-       ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
+       /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
        HASH="$(sha1sum ${LINUXDIR}/include/${header})"
        HASH="${HASH%% *}"
        printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header}
index cd97929fac663f61250edeae3397e3ab75b5ff49..dc28914fa72e076405b238225dc54c172df5de40 100644 (file)
@@ -560,7 +560,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
                    devcg->behavior == DEVCG_DEFAULT_ALLOW) {
                        rc = dev_exception_add(devcg, ex);
                        if (rc)
-                               break;
+                               return rc;
                } else {
                        /*
                         * in the other possible cases:
index 96a074019c33c28b5587d7d833110eced98669c0..0eb169acc85031f5f2a5bdc8e3e7b9b3b66a97b7 100644 (file)
@@ -713,8 +713,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent,
        INIT_LIST_HEAD(&entry->list);
        entry->parent = parent;
        entry->module = module;
-       if (parent)
+       if (parent) {
+               mutex_lock(&parent->access);
                list_add_tail(&entry->list, &parent->children);
+               mutex_unlock(&parent->access);
+       }
        return entry;
 }
 
@@ -792,7 +795,12 @@ void snd_info_free_entry(struct snd_info_entry * entry)
        list_for_each_entry_safe(p, n, &entry->children, list)
                snd_info_free_entry(p);
 
-       list_del(&entry->list);
+       p = entry->parent;
+       if (p) {
+               mutex_lock(&p->access);
+               list_del(&entry->list);
+               mutex_unlock(&p->access);
+       }
        kfree(entry->name);
        if (entry->private_free)
                entry->private_free(entry);
index 0c4dc40376a709ff2e8aabd2f9ac4d25660389be..079c12d64b0e3112361ab2a4497df41155aa7f62 100644 (file)
@@ -382,14 +382,7 @@ int snd_card_disconnect(struct snd_card *card)
        card->shutdown = 1;
        spin_unlock(&card->files_lock);
 
-       /* phase 1: disable fops (user space) operations for ALSA API */
-       mutex_lock(&snd_card_mutex);
-       snd_cards[card->number] = NULL;
-       clear_bit(card->number, snd_cards_lock);
-       mutex_unlock(&snd_card_mutex);
-       
-       /* phase 2: replace file->f_op with special dummy operations */
-       
+       /* replace file->f_op with special dummy operations */
        spin_lock(&card->files_lock);
        list_for_each_entry(mfile, &card->files_list, list) {
                /* it's critical part, use endless loop */
@@ -405,7 +398,7 @@ int snd_card_disconnect(struct snd_card *card)
        }
        spin_unlock(&card->files_lock); 
 
-       /* phase 3: notify all connected devices about disconnection */
+       /* notify all connected devices about disconnection */
        /* at this point, they cannot respond to any calls except release() */
 
 #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -421,6 +414,13 @@ int snd_card_disconnect(struct snd_card *card)
                device_del(&card->card_dev);
                card->registered = false;
        }
+
+       /* disable fops (user space) operations for ALSA API */
+       mutex_lock(&snd_card_mutex);
+       snd_cards[card->number] = NULL;
+       clear_bit(card->number, snd_cards_lock);
+       mutex_unlock(&snd_card_mutex);
+
 #ifdef CONFIG_PM
        wake_up(&card->power_sleep);
 #endif
index ec0b8595eb4da448a51d3376c9913e96f09e3075..701a69d856f5ff7acfb9e264e58abf5cf9f3215e 100644 (file)
@@ -969,6 +969,7 @@ int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
 
        /* power-up all before initialization */
        hda_set_power_state(codec, AC_PWRST_D0);
+       codec->core.dev.power.power_state = PMSG_ON;
 
        snd_hda_codec_proc_new(codec);
 
index 810479766090376fbb2f8ecdb6da6012fb4058a3..f5b510f119edd27d9328011c0dc0d9709c7f74e4 100644 (file)
@@ -7266,6 +7266,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60140},
                {0x14, 0x90170150},
                {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x21, 0x02211020}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x02211020}),
@@ -7376,6 +7378,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x14, 0x90170110},
+               {0x1b, 0x01011020},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
index 404d4b9ffe7644553a1b59fba043b151d935a2e9..df1153cea0b7ee2a27e19682837f81922fef353e 100644 (file)
@@ -32,6 +32,7 @@
 
 #ifndef __KERNEL__
 #include <stdlib.h>
+#include <time.h>
 #endif
 
 /*
index 87494c7c619d85dd4199a31b7de4a2739ad678b8..981c6ce2da2c76cee9d6a3b96b37bf6fa5cdaa33 100644 (file)
@@ -2233,7 +2233,7 @@ eval_type_str(unsigned long long val, const char *type, int pointer)
                return val & 0xffffffff;
 
        if (strcmp(type, "u64") == 0 ||
-           strcmp(type, "s64"))
+           strcmp(type, "s64") == 0)
                return val;
 
        if (strcmp(type, "s8") == 0)
index 49ee3c2033ecbd8df8408445f141c8312f7efc44..c3625ec374e0658e66a3ed7beadbcbe19b5929c0 100644 (file)
@@ -1308,6 +1308,7 @@ static void init_features(struct perf_session *session)
        for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
                perf_header__set_feat(&session->header, feat);
 
+       perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
        perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
        perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
        perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
index 1999d6533d12a35e672e4caf8c98e24010191bcf..fbbb0da43abbad579f354ac909a6ffccec14b5ce 100644 (file)
@@ -1377,6 +1377,7 @@ int cmd_top(int argc, const char **argv)
                         * */
                        .overwrite      = 0,
                        .sample_time    = true,
+                       .sample_time_set = true,
                },
                .max_stack           = sysctl__max_stack(),
                .annotation_opts     = annotation__default_options,
index 3b71902a5a21124c6006100580245ceda1c72a23..bf271fbc3a885f509d78ee60fbb0138fd12df53f 100644 (file)
@@ -331,7 +331,7 @@ if perf_db_export_calls:
                        'return_id,'
                        'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
                        'parent_call_path_id,'
-                       'parent_id'
+                       'calls.parent_id'
                ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
 
 do_query(query, 'CREATE VIEW samples_view AS '
index c6351b557bb0a9afb70d2ed4330c3496c3266a35..9494f9dc61ecac041e114ffa2553e287f416e30d 100644 (file)
@@ -57,9 +57,11 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
                else if (prog_id > node->info_linear->info.id)
                        n = n->rb_right;
                else
-                       break;
+                       goto out;
        }
+       node = NULL;
 
+out:
        up_read(&env->bpf_progs.lock);
        return node;
 }
@@ -109,10 +111,12 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
                else if (btf_id > node->id)
                        n = n->rb_right;
                else
-                       break;
+                       goto out;
        }
+       node = NULL;
 
        up_read(&env->bpf_progs.lock);
+out:
        return node;
 }
 
index 6689378ee577c18ca1efac4b95f84c5c45d40404..51ead577533fa6f6ee8c8cc49b8fc084490f0ef1 100644 (file)
@@ -1868,12 +1868,12 @@ static void *perf_evlist__poll_thread(void *arg)
 {
        struct perf_evlist *evlist = arg;
        bool draining = false;
-       int i;
+       int i, done = 0;
+
+       while (!done) {
+               bool got_data = false;
 
-       while (draining || !(evlist->thread.done)) {
-               if (draining)
-                       draining = false;
-               else if (evlist->thread.done)
+               if (evlist->thread.done)
                        draining = true;
 
                if (!draining)
@@ -1894,9 +1894,13 @@ static void *perf_evlist__poll_thread(void *arg)
                                        pr_warning("cannot locate proper evsel for the side band event\n");
 
                                perf_mmap__consume(map);
+                               got_data = true;
                        }
                        perf_mmap__read_done(map);
                }
+
+               if (draining && !got_data)
+                       break;
        }
        return NULL;
 }
index 66d066f18b5b2de290a3cc4c4a1c1caa29baaeb8..966360844fffbd10b4d97c318b22e0fc8446c1ee 100644 (file)
@@ -2368,7 +2368,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
                if (data->user_regs.abi) {
                        u64 mask = evsel->attr.sample_regs_user;
 
-                       sz = hweight_long(mask) * sizeof(u64);
+                       sz = hweight64(mask) * sizeof(u64);
                        OVERFLOW_CHECK(array, sz, max_size);
                        data->user_regs.mask = mask;
                        data->user_regs.regs = (u64 *)array;
@@ -2424,7 +2424,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
                if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
                        u64 mask = evsel->attr.sample_regs_intr;
 
-                       sz = hweight_long(mask) * sizeof(u64);
+                       sz = hweight64(mask) * sizeof(u64);
                        OVERFLOW_CHECK(array, sz, max_size);
                        data->intr_regs.mask = mask;
                        data->intr_regs.regs = (u64 *)array;
@@ -2552,7 +2552,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
        if (type & PERF_SAMPLE_REGS_USER) {
                if (sample->user_regs.abi) {
                        result += sizeof(u64);
-                       sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->user_regs.mask) * sizeof(u64);
                        result += sz;
                } else {
                        result += sizeof(u64);
@@ -2580,7 +2580,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
        if (type & PERF_SAMPLE_REGS_INTR) {
                if (sample->intr_regs.abi) {
                        result += sizeof(u64);
-                       sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
                        result += sz;
                } else {
                        result += sizeof(u64);
@@ -2710,7 +2710,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
        if (type & PERF_SAMPLE_REGS_USER) {
                if (sample->user_regs.abi) {
                        *array++ = sample->user_regs.abi;
-                       sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->user_regs.mask) * sizeof(u64);
                        memcpy(array, sample->user_regs.regs, sz);
                        array = (void *)array + sz;
                } else {
@@ -2746,7 +2746,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
        if (type & PERF_SAMPLE_REGS_INTR) {
                if (sample->intr_regs.abi) {
                        *array++ = sample->intr_regs.abi;
-                       sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
                        memcpy(array, sample->intr_regs.regs, sz);
                        array = (void *)array + sz;
                } else {
index b9e693825873a8459c055a62cfcf1aefb289c99a..2d2af2ac2b1e976041b5e05eca4374c6b424b8fa 100644 (file)
@@ -2606,6 +2606,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
                perf_env__insert_bpf_prog_info(env, info_node);
        }
 
+       up_write(&env->bpf_progs.lock);
        return 0;
 out:
        free(info_linear);
@@ -2623,7 +2624,9 @@ static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data _
 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
 {
        struct perf_env *env = &ff->ph->env;
+       struct btf_node *node = NULL;
        u32 count, i;
+       int err = -1;
 
        if (ff->ph->needs_swap) {
                pr_warning("interpreting btf from systems with endianity is not yet supported\n");
@@ -2636,31 +2639,32 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
        down_write(&env->bpf_progs.lock);
 
        for (i = 0; i < count; ++i) {
-               struct btf_node *node;
                u32 id, data_size;
 
                if (do_read_u32(ff, &id))
-                       return -1;
+                       goto out;
                if (do_read_u32(ff, &data_size))
-                       return -1;
+                       goto out;
 
                node = malloc(sizeof(struct btf_node) + data_size);
                if (!node)
-                       return -1;
+                       goto out;
 
                node->id = id;
                node->data_size = data_size;
 
-               if (__do_read(ff, node->data, data_size)) {
-                       free(node);
-                       return -1;
-               }
+               if (__do_read(ff, node->data, data_size))
+                       goto out;
 
                perf_env__insert_btf(env, node);
+               node = NULL;
        }
 
+       err = 0;
+out:
        up_write(&env->bpf_progs.lock);
-       return 0;
+       free(node);
+       return err;
 }
 
 struct feature_ops {
index e32628cd20a7f36e0e06efb3ee5c2e6cae09dabb..ee71efb9db62e676c8fab2a60c34d079ba7e7e7e 100644 (file)
@@ -261,6 +261,22 @@ bool __map__is_extra_kernel_map(const struct map *map)
        return kmap && kmap->name[0];
 }
 
+bool __map__is_bpf_prog(const struct map *map)
+{
+       const char *name;
+
+       if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
+               return true;
+
+       /*
+        * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
+        * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
+        * guess the type based on name.
+        */
+       name = map->dso->short_name;
+       return name && (strstr(name, "bpf_prog_") == name);
+}
+
 bool map__has_symbols(const struct map *map)
 {
        return dso__has_symbols(map->dso);
@@ -910,10 +926,8 @@ static void __maps__insert_name(struct maps *maps, struct map *map)
                rc = strcmp(m->dso->short_name, map->dso->short_name);
                if (rc < 0)
                        p = &(*p)->rb_left;
-               else if (rc  > 0)
-                       p = &(*p)->rb_right;
                else
-                       return;
+                       p = &(*p)->rb_right;
        }
        rb_link_node(&map->rb_node_name, parent, p);
        rb_insert_color(&map->rb_node_name, &maps->names);
index 0e20749f2c55d533842171dd2b2a7262f02768d1..dc93787c74f01b65fa7fcc76388a57472db707e7 100644 (file)
@@ -159,10 +159,12 @@ int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
 
 bool __map__is_kernel(const struct map *map);
 bool __map__is_extra_kernel_map(const struct map *map);
+bool __map__is_bpf_prog(const struct map *map);
 
 static inline bool __map__is_kmodule(const struct map *map)
 {
-       return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map);
+       return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) &&
+              !__map__is_bpf_prog(map);
 }
 
 bool map__has_symbols(const struct map *map);
index b579f962451d6464035c6649ac714998c05a225f..85ffdcfa596b5011b93abf3c65e90cd33cceb61f 100644 (file)
@@ -146,6 +146,7 @@ static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
 struct nfit_test_sec {
        u8 state;
        u8 ext_state;
+       u8 old_state;
        u8 passphrase[32];
        u8 master_passphrase[32];
        u64 overwrite_end_time;
@@ -225,6 +226,8 @@ static struct workqueue_struct *nfit_wq;
 
 static struct gen_pool *nfit_pool;
 
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
 static struct nfit_test *to_nfit_test(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -1059,8 +1062,7 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
        struct device *dev = &t->pdev.dev;
        struct nfit_test_sec *sec = &dimm_sec_info[dimm];
 
-       if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED) ||
-                       (sec->state & ND_INTEL_SEC_STATE_FROZEN)) {
+       if (sec->state & ND_INTEL_SEC_STATE_FROZEN) {
                nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
                dev_dbg(dev, "secure erase: wrong security state\n");
        } else if (memcmp(nd_cmd->passphrase, sec->passphrase,
@@ -1068,6 +1070,12 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
                nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
                dev_dbg(dev, "secure erase: wrong passphrase\n");
        } else {
+               if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED)
+                               && (memcmp(nd_cmd->passphrase, zero_key,
+                                       ND_INTEL_PASSPHRASE_SIZE) != 0)) {
+                       dev_dbg(dev, "invalid zero key\n");
+                       return 0;
+               }
                memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
                memset(sec->master_passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
                sec->state = 0;
@@ -1093,7 +1101,7 @@ static int nd_intel_test_cmd_overwrite(struct nfit_test *t,
                return 0;
        }
 
-       memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
+       sec->old_state = sec->state;
        sec->state = ND_INTEL_SEC_STATE_OVERWRITE;
        dev_dbg(dev, "overwrite progressing.\n");
        sec->overwrite_end_time = get_jiffies_64() + 5 * HZ;
@@ -1115,7 +1123,8 @@ static int nd_intel_test_cmd_query_overwrite(struct nfit_test *t,
 
        if (time_is_before_jiffies64(sec->overwrite_end_time)) {
                sec->overwrite_end_time = 0;
-               sec->state = 0;
+               sec->state = sec->old_state;
+               sec->old_state = 0;
                sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
                dev_dbg(dev, "overwrite is complete\n");
        } else
index c4cf6e6d800ebe3d2d595805397ce1b3f70e7de3..a6c196c8534cea2a49d600aa23b39070bfd481f0 100755 (executable)
@@ -11,6 +11,7 @@ lib_dir=$(dirname $0)/../../../net/forwarding
 
 ALL_TESTS="
        rif_set_addr_test
+       rif_vrf_set_addr_test
        rif_inherit_bridge_addr_test
        rif_non_inherit_bridge_addr_test
        vlan_interface_deletion_test
@@ -98,6 +99,25 @@ rif_set_addr_test()
        ip link set dev $swp1 addr $swp1_mac
 }
 
+rif_vrf_set_addr_test()
+{
+       # Test that it is possible to set an IP address on a VRF upper despite
+       # its random MAC address.
+       RET=0
+
+       ip link add name vrf-test type vrf table 10
+       ip link set dev $swp1 master vrf-test
+
+       ip -4 address add 192.0.2.1/24 dev vrf-test
+       check_err $? "failed to set IPv4 address on VRF"
+       ip -6 address add 2001:db8:1::1/64 dev vrf-test
+       check_err $? "failed to set IPv6 address on VRF"
+
+       log_test "RIF - setting IP address on VRF"
+
+       ip link del dev vrf-test
+}
+
 rif_inherit_bridge_addr_test()
 {
        RET=0
index 7514fcea91a73e80a91313ab280fb90e12375138..f8588cca2bef4bfe4d3cdf2afdb6586f21e67894 100644 (file)
@@ -1,3 +1,5 @@
+include ../../../../scripts/Kbuild.include
+
 all:
 
 top_srcdir = ../../../..
@@ -17,6 +19,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
+TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
 
@@ -30,7 +33,11 @@ INSTALL_HDR_PATH = $(top_srcdir)/usr
 LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
 LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
 CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
-LDFLAGS += -pthread -no-pie
+
+no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
+        $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
+
+LDFLAGS += -pthread $(no-pie-option)
 
 # After inclusion, $(OUTPUT) is defined and
 # $(TEST_GEN_PROGS) starts with $(OUTPUT)/
index e2884c2b81fff80c1ec6c261828dbb0493b3e98b..6063d5b2f3561c450778f86f3d1474390d79b5ec 100644 (file)
@@ -778,6 +778,33 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
 #define MSR_IA32_APICBASE_ENABLE       (1<<11)
 #define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
 
+#define APIC_BASE_MSR  0x800
+#define X2APIC_ENABLE  (1UL << 10)
+#define        APIC_ICR        0x300
+#define                APIC_DEST_SELF          0x40000
+#define                APIC_DEST_ALLINC        0x80000
+#define                APIC_DEST_ALLBUT        0xC0000
+#define                APIC_ICR_RR_MASK        0x30000
+#define                APIC_ICR_RR_INVALID     0x00000
+#define                APIC_ICR_RR_INPROG      0x10000
+#define                APIC_ICR_RR_VALID       0x20000
+#define                APIC_INT_LEVELTRIG      0x08000
+#define                APIC_INT_ASSERT         0x04000
+#define                APIC_ICR_BUSY           0x01000
+#define                APIC_DEST_LOGICAL       0x00800
+#define                APIC_DEST_PHYSICAL      0x00000
+#define                APIC_DM_FIXED           0x00000
+#define                APIC_DM_FIXED_MASK      0x00700
+#define                APIC_DM_LOWEST          0x00100
+#define                APIC_DM_SMI             0x00200
+#define                APIC_DM_REMRD           0x00300
+#define                APIC_DM_NMI             0x00400
+#define                APIC_DM_INIT            0x00500
+#define                APIC_DM_STARTUP         0x00600
+#define                APIC_DM_EXTINT          0x00700
+#define                APIC_VECTOR_MASK        0x000FF
+#define        APIC_ICR2       0x310
+
 #define MSR_IA32_TSCDEADLINE           0x000006e0
 
 #define MSR_IA32_UCODE_WRITE           0x00000079
index efa0aad8b3c69ab370a1f5440194cee3486c11db..4ca96b228e46ba248476803583cb94d14410ff16 100644 (file)
@@ -91,6 +91,11 @@ static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
        if (vm->kvm_fd < 0)
                exit(KSFT_SKIP);
 
+       if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
+               fprintf(stderr, "immediate_exit not available, skipping test\n");
+               exit(KSFT_SKIP);
+       }
+
        vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
        TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
                "rc: %i errno: %i", vm->fd, errno);
index f28127f4a3af63cb9ac15d2124f425e7492fccda..dc7fae9fa424cf2b45fb7acf10c4b58c272763a0 100644 (file)
@@ -1030,6 +1030,14 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
                            nested_size, sizeof(state->nested_));
        }
 
+       /*
+        * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
+        * guest state is consistent only after userspace re-enters the
+        * kernel with KVM_RUN.  Complete IO prior to migrating state
+        * to a new VM.
+        */
+       vcpu_run_complete_io(vm, vcpuid);
+
        nmsrs = kvm_get_num_msrs(vm);
        list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
        list->nmsrs = nmsrs;
@@ -1093,12 +1101,6 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
        struct vcpu *vcpu = vcpu_find(vm, vcpuid);
        int r;
 
-       if (state->nested.size) {
-               r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
-               TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
-                       r);
-       }
-
        r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
                 r);
@@ -1130,4 +1132,10 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
        r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
                 r);
+
+       if (state->nested.size) {
+               r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
+               TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
+                       r);
+       }
 }
index c49c2a28b0eb290ccd6c51498a0b9fd716b58b07..36669684eca58a6c09140453f70a403cf0119348 100644 (file)
@@ -123,8 +123,6 @@ int main(int argc, char *argv[])
                            stage, run->exit_reason,
                            exit_reason_str(run->exit_reason));
 
-               memset(&regs1, 0, sizeof(regs1));
-               vcpu_regs_get(vm, VCPU_ID, &regs1);
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_ABORT:
                        TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -144,6 +142,9 @@ int main(int argc, char *argv[])
                            stage, (ulong)uc.args[1]);
 
                state = vcpu_save_state(vm, VCPU_ID);
+               memset(&regs1, 0, sizeof(regs1));
+               vcpu_regs_get(vm, VCPU_ID, &regs1);
+
                kvm_vm_release(vm);
 
                /* Restore state in a new VM.  */
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
new file mode 100644 (file)
index 0000000..fb80869
--- /dev/null
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ * Tests for SMM.
+ */
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+
+#include "vmx.h"
+
+#define VCPU_ID              1
+
+#define PAGE_SIZE  4096
+
+#define SMRAM_SIZE 65536
+#define SMRAM_MEMSLOT ((1 << 16) | 1)
+#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
+#define SMRAM_GPA 0x1000000
+#define SMRAM_STAGE 0xfe
+
+#define STR(x) #x
+#define XSTR(s) STR(s)
+
+#define SYNC_PORT 0xe
+#define DONE 0xff
+
+/*
+ * This is compiled as normal 64-bit code, however, SMI handler is executed
+ * in real-address mode. To stay simple we're limiting ourselves to a mode
+ * independent subset of asm here.
+ * SMI handler always report back fixed stage SMRAM_STAGE.
+ */
+uint8_t smi_handler[] = {
+       0xb0, SMRAM_STAGE,    /* mov $SMRAM_STAGE, %al */
+       0xe4, SYNC_PORT,      /* in $SYNC_PORT, %al */
+       0x0f, 0xaa,           /* rsm */
+};
+
+void sync_with_host(uint64_t phase)
+{
+       asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
+                    : : "a" (phase));
+}
+
+void self_smi(void)
+{
+       wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4),
+             APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
+}
+
+void guest_code(struct vmx_pages *vmx_pages)
+{
+       uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+
+       sync_with_host(1);
+
+       wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
+
+       sync_with_host(2);
+
+       self_smi();
+
+       sync_with_host(4);
+
+       if (vmx_pages) {
+               GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+
+               sync_with_host(5);
+
+               self_smi();
+
+               sync_with_host(7);
+       }
+
+       sync_with_host(DONE);
+}
+
+int main(int argc, char *argv[])
+{
+       struct vmx_pages *vmx_pages = NULL;
+       vm_vaddr_t vmx_pages_gva = 0;
+
+       struct kvm_regs regs;
+       struct kvm_vm *vm;
+       struct kvm_run *run;
+       struct kvm_x86_state *state;
+       int stage, stage_reported;
+
+       /* Create VM */
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+       run = vcpu_state(vm, VCPU_ID);
+
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
+                                   SMRAM_MEMSLOT, SMRAM_PAGES, 0);
+       TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
+                   == SMRAM_GPA, "could not allocate guest physical addresses?");
+
+       memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
+       memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
+              sizeof(smi_handler));
+
+       vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
+
+       if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
+               vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+               vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+       } else {
+               printf("will skip SMM test with VMX enabled\n");
+               vcpu_args_set(vm, VCPU_ID, 1, 0);
+       }
+
+       for (stage = 1;; stage++) {
+               _vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Stage %d: unexpected exit reason: %u (%s),\n",
+                           stage, run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               memset(&regs, 0, sizeof(regs));
+               vcpu_regs_get(vm, VCPU_ID, &regs);
+
+               stage_reported = regs.rax & 0xff;
+
+               if (stage_reported == DONE)
+                       goto done;
+
+               TEST_ASSERT(stage_reported == stage ||
+                           stage_reported == SMRAM_STAGE,
+                           "Unexpected stage: #%x, got %x",
+                           stage, stage_reported);
+
+               state = vcpu_save_state(vm, VCPU_ID);
+               kvm_vm_release(vm);
+               kvm_vm_restart(vm, O_RDWR);
+               vm_vcpu_add(vm, VCPU_ID, 0, 0);
+               vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+               vcpu_load_state(vm, VCPU_ID, state);
+               run = vcpu_state(vm, VCPU_ID);
+               free(state);
+       }
+
+done:
+       kvm_vm_free(vm);
+}
index 30f75856cf3984277bee22caad9e5df95f98aa26..e0a3c0204b7cd11c5da7024bea68f0da71e41bab 100644 (file)
@@ -134,11 +134,6 @@ int main(int argc, char *argv[])
 
        struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
-       if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
-               fprintf(stderr, "immediate_exit not available, skipping test\n");
-               exit(KSFT_SKIP);
-       }
-
        /* Create VM */
        vm = vm_create_default(VCPU_ID, 0, guest_code);
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
@@ -179,18 +174,10 @@ int main(int argc, char *argv[])
                            uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
                            stage, (ulong)uc.args[1]);
 
-               /*
-                * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
-                * guest state is consistent only after userspace re-enters the
-                * kernel with KVM_RUN.  Complete IO prior to migrating state
-                * to a new VM.
-                */
-               vcpu_run_complete_io(vm, VCPU_ID);
-
+               state = vcpu_save_state(vm, VCPU_ID);
                memset(&regs1, 0, sizeof(regs1));
                vcpu_regs_get(vm, VCPU_ID, &regs1);
 
-               state = vcpu_save_state(vm, VCPU_ID);
                kvm_vm_release(vm);
 
                /* Restore state in a new VM.  */
index 1080ff55a788f720f240271741fbc38680061b7a..0d2a5f4f1e63829f3ca8dfcd8695b91409823f7f 100755 (executable)
@@ -605,6 +605,39 @@ run_cmd()
        return $rc
 }
 
+check_expected()
+{
+       local out="$1"
+       local expected="$2"
+       local rc=0
+
+       [ "${out}" = "${expected}" ] && return 0
+
+       if [ -z "${out}" ]; then
+               if [ "$VERBOSE" = "1" ]; then
+                       printf "\nNo route entry found\n"
+                       printf "Expected:\n"
+                       printf "    ${expected}\n"
+               fi
+               return 1
+       fi
+
+       # tricky way to convert output to 1-line without ip's
+       # messy '\'; this drops all extra white space
+       out=$(echo ${out})
+       if [ "${out}" != "${expected}" ]; then
+               rc=1
+               if [ "${VERBOSE}" = "1" ]; then
+                       printf "    Unexpected route entry. Have:\n"
+                       printf "        ${out}\n"
+                       printf "    Expected:\n"
+                       printf "        ${expected}\n\n"
+               fi
+       fi
+
+       return $rc
+}
+
 # add route for a prefix, flushing any existing routes first
 # expected to be the first step of a test
 add_route6()
@@ -652,31 +685,7 @@ check_route6()
        pfx=$1
 
        out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//')
-       [ "${out}" = "${expected}" ] && return 0
-
-       if [ -z "${out}" ]; then
-               if [ "$VERBOSE" = "1" ]; then
-                       printf "\nNo route entry found\n"
-                       printf "Expected:\n"
-                       printf "    ${expected}\n"
-               fi
-               return 1
-       fi
-
-       # tricky way to convert output to 1-line without ip's
-       # messy '\'; this drops all extra white space
-       out=$(echo ${out})
-       if [ "${out}" != "${expected}" ]; then
-               rc=1
-               if [ "${VERBOSE}" = "1" ]; then
-                       printf "    Unexpected route entry. Have:\n"
-                       printf "        ${out}\n"
-                       printf "    Expected:\n"
-                       printf "        ${expected}\n\n"
-               fi
-       fi
-
-       return $rc
+       check_expected "${out}" "${expected}"
 }
 
 route_cleanup()
@@ -725,7 +734,7 @@ route_setup()
        ip -netns ns2 addr add 172.16.103.2/24 dev veth4
        ip -netns ns2 addr add 172.16.104.1/24 dev dummy1
 
-       set +ex
+       set +e
 }
 
 # assumption is that basic add of a single path route works
@@ -960,7 +969,8 @@ ipv6_addr_metric_test()
        run_cmd "$IP li set dev dummy2 down"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route6 ""
+               out=$($IP -6 ro ls match 2001:db8:104::/64)
+               check_expected "${out}" ""
                rc=$?
        fi
        log_test $rc 0 "Prefix route removed on link down"
@@ -1091,38 +1101,13 @@ check_route()
        local pfx
        local expected="$1"
        local out
-       local rc=0
 
        set -- $expected
        pfx=$1
        [ "${pfx}" = "unreachable" ] && pfx=$2
 
        out=$($IP ro ls match ${pfx})
-       [ "${out}" = "${expected}" ] && return 0
-
-       if [ -z "${out}" ]; then
-               if [ "$VERBOSE" = "1" ]; then
-                       printf "\nNo route entry found\n"
-                       printf "Expected:\n"
-                       printf "    ${expected}\n"
-               fi
-               return 1
-       fi
-
-       # tricky way to convert output to 1-line without ip's
-       # messy '\'; this drops all extra white space
-       out=$(echo ${out})
-       if [ "${out}" != "${expected}" ]; then
-               rc=1
-               if [ "${VERBOSE}" = "1" ]; then
-                       printf "    Unexpected route entry. Have:\n"
-                       printf "        ${out}\n"
-                       printf "    Expected:\n"
-                       printf "        ${expected}\n\n"
-               fi
-       fi
-
-       return $rc
+       check_expected "${out}" "${expected}"
 }
 
 # assumption is that basic add of a single path route works
@@ -1387,7 +1372,8 @@ ipv4_addr_metric_test()
        run_cmd "$IP li set dev dummy2 down"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route ""
+               out=$($IP ro ls match 172.16.104.0/24)
+               check_expected "${out}" ""
                rc=$?
        fi
        log_test $rc 0 "Prefix route removed on link down"
index 2dc95fda7ef76e7b723fb91d9a68f44bcb6a2897..ea5938ec009a5eb9e28cb1778e081a568e66fd65 100755 (executable)
@@ -6,12 +6,14 @@ if [ $(id -u) != 0 ]; then
        exit 0
 fi
 
+ret=0
 echo "--------------------"
 echo "running psock_fanout test"
 echo "--------------------"
 ./in_netns.sh ./psock_fanout
 if [ $? -ne 0 ]; then
        echo "[FAIL]"
+       ret=1
 else
        echo "[PASS]"
 fi
@@ -22,6 +24,7 @@ echo "--------------------"
 ./in_netns.sh ./psock_tpacket
 if [ $? -ne 0 ]; then
        echo "[FAIL]"
+       ret=1
 else
        echo "[PASS]"
 fi
@@ -32,6 +35,8 @@ echo "--------------------"
 ./in_netns.sh ./txring_overwrite
 if [ $? -ne 0 ]; then
        echo "[FAIL]"
+       ret=1
 else
        echo "[PASS]"
 fi
+exit $ret
index b093f39c298c3f4d7ee43eed7c58772860f55da4..14e41faf2c5740633f9dd30e500543647b736734 100755 (executable)
@@ -7,7 +7,7 @@ echo "--------------------"
 ./socket
 if [ $? -ne 0 ]; then
        echo "[FAIL]"
+       exit 1
 else
        echo "[PASS]"
 fi
-
index c9ff2b47bd1ca3a2f70ee0683cb2b79b170c74f5..a37cb1192c6a6bc6080c829b63768e6ba52f8dd1 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for netfilter selftests
 
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh conntrack_icmp_related.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
new file mode 100755 (executable)
index 0000000..b48e183
--- /dev/null
@@ -0,0 +1,283 @@
+#!/bin/bash
+#
+# check that ICMP df-needed/pkttoobig icmp are set are set as related
+# state
+#
+# Setup is:
+#
+# nsclient1 -> nsrouter1 -> nsrouter2 -> nsclient2
+# MTU 1500, except for nsrouter2 <-> nsclient2 link (1280).
+# ping nsclient2 from nsclient1, checking that conntrack did set RELATED
+# 'fragmentation needed' icmp packet.
+#
+# In addition, nsrouter1 will perform IP masquerading, i.e. also
+# check the icmp errors are propagated to the correct host as per
+# nat of "established" icmp-echo "connection".
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+cleanup() {
+       for i in 1 2;do ip netns del nsclient$i;done
+       for i in 1 2;do ip netns del nsrouter$i;done
+}
+
+ipv4() {
+    echo -n 192.168.$1.2
+}
+
+ipv6 () {
+    echo -n dead:$1::2
+}
+
+check_counter()
+{
+       ns=$1
+       name=$2
+       expect=$3
+       local lret=0
+
+       cnt=$(ip netns exec $ns nft list counter inet filter "$name" | grep -q "$expect")
+       if [ $? -ne 0 ]; then
+               echo "ERROR: counter $name in $ns has unexpected value (expected $expect)" 1>&2
+               ip netns exec $ns nft list counter inet filter "$name" 1>&2
+               lret=1
+       fi
+
+       return $lret
+}
+
+check_unknown()
+{
+       expect="packets 0 bytes 0"
+       for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+               check_counter $n "unknown" "$expect"
+               if [ $? -ne 0 ] ;then
+                       return 1
+               fi
+       done
+
+       return 0
+}
+
+for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+  ip netns add $n
+  ip -net $n link set lo up
+done
+
+DEV=veth0
+ip link add $DEV netns nsclient1 type veth peer name eth1 netns nsrouter1
+DEV=veth0
+ip link add $DEV netns nsclient2 type veth peer name eth1 netns nsrouter2
+
+DEV=veth0
+ip link add $DEV netns nsrouter1 type veth peer name eth2 netns nsrouter2
+
+DEV=veth0
+for i in 1 2; do
+    ip -net nsclient$i link set $DEV up
+    ip -net nsclient$i addr add $(ipv4 $i)/24 dev $DEV
+    ip -net nsclient$i addr add $(ipv6 $i)/64 dev $DEV
+done
+
+ip -net nsrouter1 link set eth1 up
+ip -net nsrouter1 link set veth0 up
+
+ip -net nsrouter2 link set eth1 up
+ip -net nsrouter2 link set eth2 up
+
+ip -net nsclient1 route add default via 192.168.1.1
+ip -net nsclient1 -6 route add default via dead:1::1
+
+ip -net nsclient2 route add default via 192.168.2.1
+ip -net nsclient2 route add default via dead:2::1
+
+i=3
+ip -net nsrouter1 addr add 192.168.1.1/24 dev eth1
+ip -net nsrouter1 addr add 192.168.3.1/24 dev veth0
+ip -net nsrouter1 addr add dead:1::1/64 dev eth1
+ip -net nsrouter1 addr add dead:3::1/64 dev veth0
+ip -net nsrouter1 route add default via 192.168.3.10
+ip -net nsrouter1 -6 route add default via dead:3::10
+
+ip -net nsrouter2 addr add 192.168.2.1/24 dev eth1
+ip -net nsrouter2 addr add 192.168.3.10/24 dev eth2
+ip -net nsrouter2 addr add dead:2::1/64 dev eth1
+ip -net nsrouter2 addr add dead:3::10/64 dev eth2
+ip -net nsrouter2 route add default via 192.168.3.1
+ip -net nsrouter2 route add default via dead:3::1
+
+sleep 2
+for i in 4 6; do
+       ip netns exec nsrouter1 sysctl -q net.ipv$i.conf.all.forwarding=1
+       ip netns exec nsrouter2 sysctl -q net.ipv$i.conf.all.forwarding=1
+done
+
+for netns in nsrouter1 nsrouter2; do
+ip netns exec $netns nft -f - <<EOF
+table inet filter {
+       counter unknown { }
+       counter related { }
+       chain forward {
+               type filter hook forward priority 0; policy accept;
+               meta l4proto icmpv6 icmpv6 type "packet-too-big" ct state "related" counter name "related" accept
+               meta l4proto icmp icmp type "destination-unreachable" ct state "related" counter name "related" accept
+               meta l4proto { icmp, icmpv6 } ct state new,established accept
+               counter name "unknown" drop
+       }
+}
+EOF
+done
+
+ip netns exec nsclient1 nft -f - <<EOF
+table inet filter {
+       counter unknown { }
+       counter related { }
+       chain input {
+               type filter hook input priority 0; policy accept;
+               meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+               meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
+               counter name "unknown" drop
+       }
+}
+EOF
+
+ip netns exec nsclient2 nft -f - <<EOF
+table inet filter {
+       counter unknown { }
+       counter new { }
+       counter established { }
+
+       chain input {
+               type filter hook input priority 0; policy accept;
+               meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+               meta l4proto { icmp, icmpv6 } ct state "new" counter name "new" accept
+               meta l4proto { icmp, icmpv6 } ct state "established" counter name "established" accept
+               counter name "unknown" drop
+       }
+       chain output {
+               type filter hook output priority 0; policy accept;
+               meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+               meta l4proto { icmp, icmpv6 } ct state "new" counter name "new"
+               meta l4proto { icmp, icmpv6 } ct state "established" counter name "established"
+               counter name "unknown" drop
+       }
+}
+EOF
+
+
+# make sure NAT core rewrites adress of icmp error if nat is used according to
+# conntrack nat information (icmp error will be directed at nsrouter1 address,
+# but it needs to be routed to nsclient1 address).
+ip netns exec nsrouter1 nft -f - <<EOF
+table ip nat {
+       chain postrouting {
+               type nat hook postrouting priority 0; policy accept;
+               ip protocol icmp oifname "veth0" counter masquerade
+       }
+}
+table ip6 nat {
+       chain postrouting {
+               type nat hook postrouting priority 0; policy accept;
+               ip6 nexthdr icmpv6 oifname "veth0" counter masquerade
+       }
+}
+EOF
+
+ip netns exec nsrouter2 ip link set eth1  mtu 1280
+ip netns exec nsclient2 ip link set veth0 mtu 1280
+sleep 1
+
+ip netns exec nsclient1 ping -c 1 -s 1000 -q -M do 192.168.2.2 >/dev/null
+if [ $? -ne 0 ]; then
+       echo "ERROR: netns ip routing/connectivity broken" 1>&2
+       cleanup
+       exit 1
+fi
+ip netns exec nsclient1 ping6 -q -c 1 -s 1000 dead:2::2 >/dev/null
+if [ $? -ne 0 ]; then
+       echo "ERROR: netns ipv6 routing/connectivity broken" 1>&2
+       cleanup
+       exit 1
+fi
+
+check_unknown
+if [ $? -ne 0 ]; then
+       ret=1
+fi
+
+expect="packets 0 bytes 0"
+for netns in nsrouter1 nsrouter2 nsclient1;do
+       check_counter "$netns" "related" "$expect"
+       if [ $? -ne 0 ]; then
+               ret=1
+       fi
+done
+
+expect="packets 2 bytes 2076"
+check_counter nsclient2 "new" "$expect"
+if [ $? -ne 0 ]; then
+       ret=1
+fi
+
+ip netns exec nsclient1 ping -q -c 1 -s 1300 -M do 192.168.2.2 > /dev/null
+if [ $? -eq 0 ]; then
+       echo "ERROR: ping should have failed with PMTU too big error" 1>&2
+       ret=1
+fi
+
+# nsrouter2 should have generated the icmp error, so
+# related counter should be 0 (its in forward).
+expect="packets 0 bytes 0"
+check_counter "nsrouter2" "related" "$expect"
+if [ $? -ne 0 ]; then
+       ret=1
+fi
+
+# but nsrouter1 should have seen it, same for nsclient1.
+expect="packets 1 bytes 576"
+for netns in nsrouter1 nsclient1;do
+       check_counter "$netns" "related" "$expect"
+       if [ $? -ne 0 ]; then
+               ret=1
+       fi
+done
+
+ip netns exec nsclient1 ping6 -c 1 -s 1300 dead:2::2 > /dev/null
+if [ $? -eq 0 ]; then
+       echo "ERROR: ping6 should have failed with PMTU too big error" 1>&2
+       ret=1
+fi
+
+expect="packets 2 bytes 1856"
+for netns in nsrouter1 nsclient1;do
+       check_counter "$netns" "related" "$expect"
+       if [ $? -ne 0 ]; then
+               ret=1
+       fi
+done
+
+if [ $ret -eq 0 ];then
+       echo "PASS: icmp mtu error had RELATED state"
+else
+       echo "ERROR: icmp error RELATED state test has failed"
+fi
+
+cleanup
+exit $ret
index 8ec76681605cca08f8cad14720ada2986d74f76c..3194007cf8d1bf3f456d9e4594417ffe2f9d56fd 100755 (executable)
@@ -321,6 +321,7 @@ EOF
 
 test_masquerade6()
 {
+       local natflags=$1
        local lret=0
 
        ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
@@ -354,13 +355,13 @@ ip netns exec ns0 nft -f - <<EOF
 table ip6 nat {
        chain postrouting {
                type nat hook postrouting priority 0; policy accept;
-               meta oif veth0 masquerade
+               meta oif veth0 masquerade $natflags
        }
 }
 EOF
        ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+               echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags"
                lret=1
        fi
 
@@ -397,19 +398,26 @@ EOF
                fi
        done
 
+       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags (attempt 2)"
+               lret=1
+       fi
+
        ip netns exec ns0 nft flush chain ip6 nat postrouting
        if [ $? -ne 0 ]; then
                echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+       test $lret -eq 0 && echo "PASS: IPv6 masquerade $natflags for ns2"
 
        return $lret
 }
 
 test_masquerade()
 {
+       local natflags=$1
        local lret=0
 
        ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
@@ -417,7 +425,7 @@ test_masquerade()
 
        ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: canot ping ns1 from ns2"
+               echo "ERROR: cannot ping ns1 from ns2 $natflags"
                lret=1
        fi
 
@@ -443,13 +451,13 @@ ip netns exec ns0 nft -f - <<EOF
 table ip nat {
        chain postrouting {
                type nat hook postrouting priority 0; policy accept;
-               meta oif veth0 masquerade
+               meta oif veth0 masquerade $natflags
        }
 }
 EOF
        ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+               echo "ERROR: cannot ping ns1 from ns2 with active ip masquere $natflags"
                lret=1
        fi
 
@@ -485,13 +493,19 @@ EOF
                fi
        done
 
+       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannot ping ns1 from ns2 with active ip masquerade $natflags (attempt 2)"
+               lret=1
+       fi
+
        ip netns exec ns0 nft flush chain ip nat postrouting
        if [ $? -ne 0 ]; then
                echo "ERROR: Could not flush nat postrouting" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+       test $lret -eq 0 && echo "PASS: IP masquerade $natflags for ns2"
 
        return $lret
 }
@@ -750,8 +764,12 @@ test_local_dnat
 test_local_dnat6
 
 reset_counters
-test_masquerade
-test_masquerade6
+test_masquerade ""
+test_masquerade6 ""
+
+reset_counters
+test_masquerade "fully-random"
+test_masquerade6 "fully-random"
 
 reset_counters
 test_redirect
index 7202bbac976ea2b718952421b995ab593ce15fca..853aa164a401e054914cccf2a9663111e1c83939 100644 (file)
@@ -187,8 +187,8 @@ static int make_exe(const uint8_t *payload, size_t len)
        ph.p_offset = 0;
        ph.p_vaddr = VADDR;
        ph.p_paddr = 0;
-       ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
-       ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
+       ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len;
+       ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len;
        ph.p_align = 4096;
 
        fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_EXCL|O_TMPFILE, 0700);
index 762cb01f2ca719da36873484ef3a3489a6f71b3d..47b7473dedef74ccefead246ec19f20159c78958 100644 (file)
@@ -46,12 +46,9 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
 
 int main(void)
 {
-       const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
-#ifdef __arm__
-       unsigned long va = 2 * PAGE_SIZE;
-#else
-       unsigned long va = 0;
-#endif
+       const int PAGE_SIZE = sysconf(_SC_PAGESIZE);
+       const unsigned long va_max = 1UL << 32;
+       unsigned long va;
        void *p;
        int fd;
        unsigned long a, b;
@@ -60,10 +57,13 @@ int main(void)
        if (fd == -1)
                return 1;
 
-       p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
-       if (p == MAP_FAILED) {
-               if (errno == EPERM)
-                       return 4;
+       for (va = 0; va < va_max; va += PAGE_SIZE) {
+               p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
+               if (p == (void *)va)
+                       break;
+       }
+       if (va == va_max) {
+               fprintf(stderr, "error: mmap doesn't like you\n");
                return 1;
        }
 
index 3547b0d8c91ea2c84e0869b769e9947829fe4286..79e59e4fa3dc6be751079e669e214b7fc614e07f 100644 (file)
@@ -144,18 +144,19 @@ static int setup_routing_entry(struct kvm *kvm,
 {
        struct kvm_kernel_irq_routing_entry *ei;
        int r;
+       u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES);
 
        /*
         * Do not allow GSI to be mapped to the same irqchip more than once.
         * Allow only one to one mapping between GSI and non-irqchip routing.
         */
-       hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
+       hlist_for_each_entry(ei, &rt->map[gsi], link)
                if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
                    ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
                    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
                        return -EINVAL;
 
-       e->gsi = ue->gsi;
+       e->gsi = gsi;
        e->type = ue->type;
        r = kvm_set_routing_entry(kvm, e, ue);
        if (r)
index 55fe8e20d8fd9b7367619a250dde9076a74bdc6e..dc8edc97ba850384680b56f88063f61bebfc96c8 100644 (file)
@@ -2977,12 +2977,14 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        struct kvm_device_ops *ops = NULL;
        struct kvm_device *dev;
        bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
+       int type;
        int ret;
 
        if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
                return -ENODEV;
 
-       ops = kvm_device_ops_table[cd->type];
+       type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
+       ops = kvm_device_ops_table[type];
        if (ops == NULL)
                return -ENODEV;
 
@@ -2997,7 +2999,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        dev->kvm = kvm;
 
        mutex_lock(&kvm->lock);
-       ret = ops->create(dev, cd->type);
+       ret = ops->create(dev, type);
        if (ret < 0) {
                mutex_unlock(&kvm->lock);
                kfree(dev);