]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge tag 'omap-for-v4.16/fixes-signed' of ssh://gitolite.kernel.org/pub/scm/linux...
authorArnd Bergmann <arnd@arndb.de>
Thu, 22 Feb 2018 16:45:52 +0000 (17:45 +0100)
committerArnd Bergmann <arnd@arndb.de>
Thu, 22 Feb 2018 16:45:52 +0000 (17:45 +0100)
Fixes for omaps for v4.16-rc cycle

This is mostly SoC related fixes for clocks, interconnect, and PM with few
board specifc dts related fixes:

- Fix quirk handling for ti-sysc to check all quirk flags instead of just
  the first one

- Fix LogicPD boards for i2c1 muxing to avoid intermittent PMIC errors

- Fix debounce-interval use for omap5-uevm

- Fix debugfs_create_*() usage for omap1

- Fix sar_base initialization for HS omaps

- Fix omap3 prm wake interrupt for resume

- Fix kmemleak for omap_get_timer_dt()

- Enable optional clocks before main clock to prevent interconnect target
  module from being stuck in transition

* tag 'omap-for-v4.16/fixes-signed' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap:
  bus: ti-sysc: Fix checking of no-reset-on-init quirk
  ARM: dts: LogicPD SOM-LV: Fix I2C1 pinmux
  ARM: dts: LogicPD Torpedo: Fix I2C1 pinmux
  ARM: dts: OMAP5: uevm: Fix "debounce-interval" property misspelling
  ARM: OMAP1: clock: Fix debugfs_create_*() usage
  ARM: OMAP2+: Fix sar_base inititalization for HS omaps
  ARM: OMAP3: Fix prm wake interrupt for resume
  ARM: OMAP2+: timer: fix a kmemleak caused in omap_get_timer_dt
  ARM: OMAP2+: hwmod_core: enable optional clocks before main clock

295 files changed:
Documentation/ABI/testing/sysfs-devices-platform-dock [new file with mode: 0644]
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/ABI/testing/sysfs-platform-dptf [new file with mode: 0644]
Documentation/atomic_bitops.txt
Documentation/devicetree/bindings/power/mti,mips-cpc.txt [new file with mode: 0644]
Documentation/features/sched/membarrier-sync-core/arch-support.txt [new file with mode: 0644]
Documentation/locking/mutex-design.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/zx296702.dtsi
arch/arm/mach-clps711x/board-dt.c
arch/arm/mach-davinci/board-dm355-evm.c
arch/arm/mach-davinci/board-dm355-leopard.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-mvebu/Kconfig
arch/arm64/boot/dts/amlogic/meson-axg.dtsi
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
arch/arm64/boot/dts/mediatek/mt8173.dtsi
arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/hugetlb.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/hibernate.c
arch/arm64/kvm/hyp/switch.c
arch/arm64/mm/dump.c
arch/arm64/mm/fault.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/pageattr.c
arch/arm64/mm/proc.S
arch/ia64/kernel/Makefile
arch/mips/kernel/mips-cpc.c
arch/mips/kernel/setup.c
arch/mips/kernel/smp-bmips.c
arch/powerpc/include/asm/book3s/32/pgtable.h
arch/powerpc/include/asm/book3s/64/hash-4k.h
arch/powerpc/include/asm/book3s/64/hash-64k.h
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/book3s/64/pgalloc.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/kexec.h
arch/powerpc/include/asm/nohash/32/pgtable.h
arch/powerpc/include/asm/nohash/64/pgtable.h
arch/powerpc/include/asm/topology.h
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/sysfs.c
arch/powerpc/mm/drmem.c
arch/powerpc/mm/hash64_4k.c
arch/powerpc/mm/hash64_64k.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage-hash64.c
arch/powerpc/mm/init-common.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/tlb_hash64.c
arch/powerpc/platforms/powernv/opal-imc.c
arch/powerpc/platforms/powernv/vas-window.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/ras.c
arch/powerpc/sysdev/xive/spapr.c
arch/sparc/Kconfig
arch/x86/.gitignore
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
arch/x86/entry/calling.h
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/events/intel/core.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/p6.c
arch/x86/include/asm/acpi.h
arch/x86/include/asm/barrier.h
arch/x86/include/asm/bug.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/page_64.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_rdt.c
arch/x86/kernel/cpu/mcheck/mce-internal.h
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/proc.c
arch/x86/kernel/head_32.S
arch/x86/kernel/mpparse.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/lib/cpu.c
arch/x86/lib/error-inject.c
arch/x86/mm/init_64.c
arch/x86/mm/ioremap.c
arch/x86/mm/kmmio.c
arch/x86/mm/pgtable_32.c
arch/x86/mm/tlb.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/smp.c
block/blk-mq.c
crypto/sha3_generic.c
drivers/acpi/bus.c
drivers/acpi/ec.c
drivers/acpi/property.c
drivers/acpi/spcr.c
drivers/base/core.c
drivers/base/power/wakeirq.c
drivers/base/property.c
drivers/char/hw_random/via-rng.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/p4-clockmod.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/speedstep-centrino.c
drivers/cpufreq/speedstep-lib.c
drivers/crypto/caam/ctrl.c
drivers/crypto/padlock-aes.c
drivers/crypto/sunxi-ss/sun4i-ss-prng.c
drivers/crypto/talitos.c
drivers/edac/amd64_edac.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/trace.h
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_oa_cflgt3.c
drivers/gpu/drm/i915/i915_oa_cnl.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_pmu.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
drivers/hwmon/coretemp.c
drivers/hwmon/hwmon-vid.c
drivers/hwmon/k10temp.c
drivers/hwmon/k8temp.c
drivers/irqchip/irq-bcm7038-l1.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its-pci-msi.c
drivers/irqchip/irq-gic-v3-its-platform-msi.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-mips-gic.c
drivers/macintosh/macio_asic.c
drivers/md/dm.c
drivers/misc/ocxl/file.c
drivers/mmc/host/bcm2835.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/vf610_nfc.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/io-cmd.c
drivers/of/property.c
drivers/opp/cpu.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/wmi.c
drivers/s390/virtio/virtio_ccw.c
drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
drivers/usb/Kconfig
drivers/usb/host/Kconfig
drivers/video/fbdev/geode/video_gx.c
drivers/xen/pvcalls-front.c
drivers/xen/xenbus/xenbus.h
drivers/xen/xenbus/xenbus_comms.c
drivers/xen/xenbus/xenbus_xs.c
fs/btrfs/backref.c
fs/btrfs/delayed-ref.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/qgroup.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/gfs2/bmap.c
fs/proc/kcore.c
include/asm-generic/bitops/lock.h
include/linux/acpi.h
include/linux/blkdev.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/cpuidle.h
include/linux/cpumask.h
include/linux/dma-mapping.h
include/linux/fwnode.h
include/linux/kcore.h
include/linux/mm_inline.h
include/linux/nospec.h
include/linux/property.h
include/linux/semaphore.h
include/sound/ac97/regs.h
include/trace/events/xen.h
kernel/irq/irqdomain.c
kernel/kprobes.c
kernel/locking/qspinlock.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/deadline.c
kernel/sched/rt.c
lib/dma-direct.c
mm/memory-failure.c
mm/memory.c
net/9p/trans_virtio.c
sound/ac97/Kconfig
sound/core/seq/seq_clientmgr.c
sound/pci/hda/patch_realtek.c
sound/usb/mixer.c
sound/usb/pcm.c
sound/usb/quirks.c
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/unistd.h [deleted file]
tools/arch/x86/include/asm/cpufeatures.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/objtool/check.c
tools/objtool/check.h
tools/perf/Documentation/perf-data.txt
tools/perf/arch/s390/Makefile
tools/perf/arch/s390/entry/syscalls/mksyscalltbl
tools/perf/arch/s390/entry/syscalls/syscall.tbl [new file with mode: 0644]
tools/perf/builtin-c2c.c
tools/perf/builtin-report.c
tools/perf/builtin-top.c
tools/perf/check-headers.sh
tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/cortex-a53/other.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/mapfile.csv
tools/perf/tests/backward-ring-buffer.c
tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
tools/perf/ui/browsers/hists.c
tools/perf/ui/browsers/hists.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/hist.h
tools/perf/util/mmap.c
tools/perf/util/mmap.h
tools/perf/util/util.c
tools/testing/selftests/powerpc/alignment/alignment_handler.c
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/mpx-mini-test.c
tools/testing/selftests/x86/protection_keys.c
tools/testing/selftests/x86/single_step_syscall.c
tools/testing/selftests/x86/test_mremap_vdso.c
tools/testing/selftests/x86/test_vdso.c
tools/testing/selftests/x86/test_vsyscall.c

diff --git a/Documentation/ABI/testing/sysfs-devices-platform-dock b/Documentation/ABI/testing/sysfs-devices-platform-dock
new file mode 100644 (file)
index 0000000..1d8c18f
--- /dev/null
@@ -0,0 +1,39 @@
+What:          /sys/devices/platform/dock.N/docked
+Date:          Dec, 2006
+KernelVersion: 2.6.19
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) Value 1 or 0 indicates whether the software believes the
+               laptop is docked in a docking station.
+
+What:          /sys/devices/platform/dock.N/undock
+Date:          Dec, 2006
+KernelVersion: 2.6.19
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (WO) Writing to this file causes the software to initiate an
+               undock request to the firmware.
+
+What:          /sys/devices/platform/dock.N/uid
+Date:          Feb, 2007
+KernelVersion: v2.6.21
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) Displays the docking station the laptop is docked to.
+
+What:          /sys/devices/platform/dock.N/flags
+Date:          May, 2007
+KernelVersion: v2.6.21
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) Show dock station flags, useful for checking if undock
+               request has been made by the user (from the immediate_undock
+               option).
+
+What:          /sys/devices/platform/dock.N/type
+Date:          Aug, 2008
+KernelVersion: v2.6.27
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) Display the dock station type- dock_station, ata_bay or
+               battery_bay.
index bfd29bc8d37af1bbc4913deee9d941b1692bedda..4ed63b6cfb155cbc8b5aaab64e1030662f19411f 100644 (file)
@@ -108,6 +108,8 @@ Description:        CPU topology files that describe a logical CPU's relationship
 
 What:          /sys/devices/system/cpu/cpuidle/current_driver
                /sys/devices/system/cpu/cpuidle/current_governer_ro
+               /sys/devices/system/cpu/cpuidle/available_governors
+               /sys/devices/system/cpu/cpuidle/current_governor
 Date:          September 2007
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Discover cpuidle policy and mechanism
@@ -119,13 +121,84 @@ Description:      Discover cpuidle policy and mechanism
                Idle policy (governor) is differentiated from idle mechanism
                (driver)
 
-               current_driver: displays current idle mechanism
+               current_driver: (RO) displays current idle mechanism
 
-               current_governor_ro: displays current idle policy
+               current_governor_ro: (RO) displays current idle policy
+
+               With the cpuidle_sysfs_switch boot option enabled (meant for
+               developer testing), the following three attributes are visible
+               instead:
+
+               current_driver: same as described above
+
+               available_governors: (RO) displays a space separated list of
+               available governors
+
+               current_governor: (RW) displays current idle policy. Users can
+               switch the governor at runtime by writing to this file.
 
                See files in Documentation/cpuidle/ for more information.
 
 
+What:          /sys/devices/system/cpu/cpuX/cpuidle/stateN/name
+               /sys/devices/system/cpu/cpuX/cpuidle/stateN/latency
+               /sys/devices/system/cpu/cpuX/cpuidle/stateN/power
+               /sys/devices/system/cpu/cpuX/cpuidle/stateN/time
+               /sys/devices/system/cpu/cpuX/cpuidle/stateN/usage
+Date:          September 2007
+KernelVersion: v2.6.24
+Contact:       Linux power management list <linux-pm@vger.kernel.org>
+Description:
+               The directory /sys/devices/system/cpu/cpuX/cpuidle contains per
+               logical CPU specific cpuidle information for each online cpu X.
+               The processor idle states which are available for use have the
+               following attributes:
+
+               name: (RO) Name of the idle state (string).
+
+               latency: (RO) The latency to exit out of this idle state (in
+               microseconds).
+
+               power: (RO) The power consumed while in this idle state (in
+               milliwatts).
+
+               time: (RO) The total time spent in this idle state (in microseconds).
+
+               usage: (RO) Number of times this state was entered (a count).
+
+
+What:          /sys/devices/system/cpu/cpuX/cpuidle/stateN/desc
+Date:          February 2008
+KernelVersion: v2.6.25
+Contact:       Linux power management list <linux-pm@vger.kernel.org>
+Description:
+               (RO) A small description about the idle state (string).
+
+
+What:          /sys/devices/system/cpu/cpuX/cpuidle/stateN/disable
+Date:          March 2012
+KernelVersion: v3.10
+Contact:       Linux power management list <linux-pm@vger.kernel.org>
+Description:
+               (RW) Option to disable this idle state (bool). The behavior and
+               the effect of the disable variable depends on the implementation
+               of a particular governor. In the ladder governor, for example,
+               it is not coherent, i.e. if one is disabling a light state, then
+               all deeper states are disabled as well, but the disable variable
+               does not reflect it. Likewise, if one enables a deep state but a
+               lighter state still is disabled, then this has no effect.
+
+
+What:          /sys/devices/system/cpu/cpuX/cpuidle/stateN/residency
+Date:          March 2014
+KernelVersion: v3.15
+Contact:       Linux power management list <linux-pm@vger.kernel.org>
+Description:
+               (RO) Display the target residency i.e. the minimum amount of
+               time (in microseconds) this cpu should spend in this idle state
+               to make the transition worth the effort.
+
+
 What:          /sys/devices/system/cpu/cpu#/cpufreq/*
 Date:          pre-git history
 Contact:       linux-pm@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-platform-dptf b/Documentation/ABI/testing/sysfs-platform-dptf
new file mode 100644 (file)
index 0000000..325dc06
--- /dev/null
@@ -0,0 +1,40 @@
+What:          /sys/bus/platform/devices/INT3407:00/dptf_power/charger_type
+Date:          Jul, 2016
+KernelVersion: v4.10
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) The charger type - Traditional, Hybrid or NVDC.
+
+What:          /sys/bus/platform/devices/INT3407:00/dptf_power/adapter_rating_mw
+Date:          Jul, 2016
+KernelVersion: v4.10
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) Adapter rating in milliwatts (the maximum Adapter power).
+               Must be 0 if no AC Adaptor is plugged in.
+
+What:          /sys/bus/platform/devices/INT3407:00/dptf_power/max_platform_power_mw
+Date:          Jul, 2016
+KernelVersion: v4.10
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) Maximum platform power that can be supported by the battery
+               in milliwatts.
+
+What:          /sys/bus/platform/devices/INT3407:00/dptf_power/platform_power_source
+Date:          Jul, 2016
+KernelVersion: v4.10
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) Display the platform power source
+               0x00 = DC
+               0x01 = AC
+               0x02 = USB
+               0x03 = Wireless Charger
+
+What:          /sys/bus/platform/devices/INT3407:00/dptf_power/battery_steady_power
+Date:          Jul, 2016
+KernelVersion: v4.10
+Contact:       linux-acpi@vger.kernel.org
+Description:
+               (RO) The maximum sustained power for battery in milliwatts.
index 5550bfdcce5f1cfaab57728f4c272e9e84902860..be70b32c95d918066ffa72dfa4a69e8b4e51a225 100644 (file)
@@ -58,7 +58,12 @@ Like with atomic_t, the rule of thumb is:
 
  - RMW operations that have a return value are fully ordered.
 
-Except for test_and_set_bit_lock() which has ACQUIRE semantics and
+ - RMW operations that are conditional are unordered on FAILURE,
+   otherwise the above rules apply. In the case of test_and_{}_bit() operations,
+   if the bit in memory is unchanged by the operation then it is deemed to have
+   failed.
+
+Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
 clear_bit_unlock() which has RELEASE semantics.
 
 Since a platform only has a single means of achieving atomic operations
diff --git a/Documentation/devicetree/bindings/power/mti,mips-cpc.txt b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
new file mode 100644 (file)
index 0000000..c6b8251
--- /dev/null
@@ -0,0 +1,8 @@
+Binding for MIPS Cluster Power Controller (CPC).
+
+This binding allows a system to specify where the CPC registers are
+located.
+
+Required properties:
+compatible : Should be "mti,mips-cpc".
+regs: Should describe the address & size of the CPC register region.
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
new file mode 100644 (file)
index 0000000..2c815a7
--- /dev/null
@@ -0,0 +1,62 @@
+#
+# Feature name:          membarrier-sync-core
+#         Kconfig:       ARCH_HAS_MEMBARRIER_SYNC_CORE
+#         description:   arch supports core serializing membarrier
+#
+# Architecture requirements
+#
+# * arm64
+#
+# Rely on eret context synchronization when returning from IPI handler, and
+# when returning to user-space.
+#
+# * x86
+#
+# x86-32 uses IRET as return from interrupt, which takes care of the IPI.
+# However, it uses both IRET and SYSEXIT to go back to user-space. The IRET
+# instruction is core serializing, but not SYSEXIT.
+#
+# x86-64 uses IRET as return from interrupt, which takes care of the IPI.
+# However, it can return to user-space through either SYSRETL (compat code),
+# SYSRETQ, or IRET.
+#
+# Given that neither SYSRET{L,Q}, nor SYSEXIT, are core serializing, we rely
+# instead on write_cr3() performed by switch_mm() to provide core serialization
+# after changing the current mm, and deal with the special case of kthread ->
+# uthread (temporarily keeping current mm into active_mm) by issuing a
+# sync_core_before_usermode() in that specific case.
+#
+    -----------------------
+    |         arch |status|
+    -----------------------
+    |       alpha: | TODO |
+    |         arc: | TODO |
+    |         arm: | TODO |
+    |       arm64: |  ok  |
+    |    blackfin: | TODO |
+    |         c6x: | TODO |
+    |        cris: | TODO |
+    |         frv: | TODO |
+    |       h8300: | TODO |
+    |     hexagon: | TODO |
+    |        ia64: | TODO |
+    |        m32r: | TODO |
+    |        m68k: | TODO |
+    |       metag: | TODO |
+    |  microblaze: | TODO |
+    |        mips: | TODO |
+    |     mn10300: | TODO |
+    |       nios2: | TODO |
+    |    openrisc: | TODO |
+    |      parisc: | TODO |
+    |     powerpc: | TODO |
+    |        s390: | TODO |
+    |       score: | TODO |
+    |          sh: | TODO |
+    |       sparc: | TODO |
+    |        tile: | TODO |
+    |          um: | TODO |
+    |   unicore32: | TODO |
+    |         x86: |  ok  |
+    |      xtensa: | TODO |
+    -----------------------
index 60c482df1a38db2b300952832095ef41e2b8e655..818aca19612f4a763c96ee2eb1fbf8b3cb452e9e 100644 (file)
@@ -21,37 +21,23 @@ Implementation
 --------------
 
 Mutexes are represented by 'struct mutex', defined in include/linux/mutex.h
-and implemented in kernel/locking/mutex.c. These locks use a three
-state atomic counter (->count) to represent the different possible
-transitions that can occur during the lifetime of a lock:
-
-         1: unlocked
-         0: locked, no waiters
-   negative: locked, with potential waiters
-
-In its most basic form it also includes a wait-queue and a spinlock
-that serializes access to it. CONFIG_SMP systems can also include
-a pointer to the lock task owner (->owner) as well as a spinner MCS
-lock (->osq), both described below in (ii).
+and implemented in kernel/locking/mutex.c. These locks use an atomic variable
+(->owner) to keep track of the lock state during its lifetime.  Field owner
+actually contains 'struct task_struct *' to the current lock owner and it is
+therefore NULL if not currently owned. Since task_struct pointers are aligned
+at at least L1_CACHE_BYTES, low bits (3) are used to store extra state (e.g.,
+if waiter list is non-empty).  In its most basic form it also includes a
+wait-queue and a spinlock that serializes access to it. Furthermore,
+CONFIG_MUTEX_SPIN_ON_OWNER=y systems use a spinner MCS lock (->osq), described
+below in (ii).
 
 When acquiring a mutex, there are three possible paths that can be
 taken, depending on the state of the lock:
 
-(i) fastpath: tries to atomically acquire the lock by decrementing the
-    counter. If it was already taken by another task it goes to the next
-    possible path. This logic is architecture specific. On x86-64, the
-    locking fastpath is 2 instructions:
-
-    0000000000000e10 <mutex_lock>:
-    e21:   f0 ff 0b                lock decl (%rbx)
-    e24:   79 08                   jns    e2e <mutex_lock+0x1e>
-
-   the unlocking fastpath is equally tight:
-
-    0000000000000bc0 <mutex_unlock>:
-    bc8:   f0 ff 07                lock incl (%rdi)
-    bcb:   7f 0a                   jg     bd7 <mutex_unlock+0x17>
-
+(i) fastpath: tries to atomically acquire the lock by cmpxchg()ing the owner with
+    the current task. This only works in the uncontended case (cmpxchg() checks
+    against 0UL, so all 3 state bits above have to be 0). If the lock is
+    contended it goes to the next possible path.
 
 (ii) midpath: aka optimistic spinning, tries to spin for acquisition
      while the lock owner is running and there are no other tasks ready
@@ -143,11 +129,10 @@ Test if the mutex is taken:
 Disadvantages
 -------------
 
-Unlike its original design and purpose, 'struct mutex' is larger than
-most locks in the kernel. E.g: on x86-64 it is 40 bytes, almost twice
-as large as 'struct semaphore' (24 bytes) and tied, along with rwsems,
-for the largest lock in the kernel. Larger structure sizes mean more
-CPU cache and memory footprint.
+Unlike its original design and purpose, 'struct mutex' is among the largest
+locks in the kernel. E.g: on x86-64 it is 32 bytes, where 'struct semaphore'
+is 24 bytes and rw_semaphore is 40 bytes. Larger structure sizes mean more CPU
+cache and memory footprint.
 
 When to use mutexes
 -------------------
index 3bdc260e36b7a7eaac26003b187c436655fc3412..f2e4d9d85ee4b65dfe7b6684704eb87a1f290aef 100644 (file)
@@ -1238,7 +1238,7 @@ F:        drivers/clk/at91
 
 ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
 M:     Nicolas Ferre <nicolas.ferre@microchip.com>
-M:     Alexandre Belloni <alexandre.belloni@free-electrons.com>
+M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.linux4sam.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git
@@ -1590,7 +1590,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
 M:     Andrew Lunn <andrew@lunn.ch>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
-M:     Gregory Clement <gregory.clement@free-electrons.com>
+M:     Gregory Clement <gregory.clement@bootlin.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/soc/dove/
@@ -1604,7 +1604,7 @@ F:        arch/arm/boot/dts/orion5x*
 ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
 M:     Andrew Lunn <andrew@lunn.ch>
-M:     Gregory Clement <gregory.clement@free-electrons.com>
+M:     Gregory Clement <gregory.clement@bootlin.com>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -9206,6 +9206,7 @@ MIPS GENERIC PLATFORM
 M:     Paul Burton <paul.burton@mips.com>
 L:     linux-mips@linux-mips.org
 S:     Supported
+F:     Documentation/devicetree/bindings/power/mti,mips-cpc.txt
 F:     arch/mips/generic/
 F:     arch/mips/tools/generic-board-config.sh
 
@@ -9945,6 +9946,7 @@ F:        drivers/nfc/nxp-nci
 
 OBJTOOL
 M:     Josh Poimboeuf <jpoimboe@redhat.com>
+M:     Peter Zijlstra <peterz@infradead.org>
 S:     Supported
 F:     tools/objtool/
 
index 79ad2bfa24b68f279af011f82e87f16d64d1440d..d9cf3a40eda9d20ce03ceda2ebc921a95dc2aea7 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 8a74efdb636062e28e309e130f557e5bc582d65f..240e7a23d81ff3cc2eb2facad8a126970775a152 100644 (file)
@@ -56,7 +56,7 @@ global_timer: timer@8000200 {
                        clocks = <&topclk ZX296702_A9_PERIPHCLK>;
                };
 
-               l2cc: l2-cache-controller@0x00c00000 {
+               l2cc: l2-cache-controller@c00000 {
                        compatible = "arm,pl310-cache";
                        reg = <0x00c00000 0x1000>;
                        cache-unified;
@@ -67,30 +67,30 @@ l2cc: l2-cache-controller@0x00c00000 {
                        arm,double-linefill-incr = <0>;
                };
 
-               pcu: pcu@0xa0008000 {
+               pcu: pcu@a0008000 {
                        compatible = "zte,zx296702-pcu";
                        reg = <0xa0008000 0x1000>;
                };
 
-               topclk: topclk@0x09800000 {
+               topclk: topclk@9800000 {
                        compatible = "zte,zx296702-topcrm-clk";
                        reg = <0x09800000 0x1000>;
                        #clock-cells = <1>;
                };
 
-               lsp1clk: lsp1clk@0x09400000 {
+               lsp1clk: lsp1clk@9400000 {
                        compatible = "zte,zx296702-lsp1crpm-clk";
                        reg = <0x09400000 0x1000>;
                        #clock-cells = <1>;
                };
 
-               lsp0clk: lsp0clk@0x0b000000 {
+               lsp0clk: lsp0clk@b000000 {
                        compatible = "zte,zx296702-lsp0crpm-clk";
                        reg = <0x0b000000 0x1000>;
                        #clock-cells = <1>;
                };
 
-               uart0: serial@0x09405000 {
+               uart0: serial@9405000 {
                        compatible = "zte,zx296702-uart";
                        reg = <0x09405000 0x1000>;
                        interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
@@ -98,7 +98,7 @@ uart0: serial@0x09405000 {
                        status = "disabled";
                };
 
-               uart1: serial@0x09406000 {
+               uart1: serial@9406000 {
                        compatible = "zte,zx296702-uart";
                        reg = <0x09406000 0x1000>;
                        interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
@@ -106,7 +106,7 @@ uart1: serial@0x09406000 {
                        status = "disabled";
                };
 
-               mmc0: mmc@0x09408000 {
+               mmc0: mmc@9408000 {
                        compatible = "snps,dw-mshc";
                        #address-cells = <1>;
                        #size-cells = <0>;
@@ -119,7 +119,7 @@ mmc0: mmc@0x09408000 {
                        status = "disabled";
                };
 
-               mmc1: mmc@0x0b003000 {
+               mmc1: mmc@b003000 {
                        compatible = "snps,dw-mshc";
                        #address-cells = <1>;
                        #size-cells = <0>;
@@ -132,7 +132,7 @@ mmc1: mmc@0x0b003000 {
                        status = "disabled";
                };
 
-               sysctrl: sysctrl@0xa0007000 {
+               sysctrl: sysctrl@a0007000 {
                        compatible = "zte,sysctrl", "syscon";
                        reg = <0xa0007000 0x1000>;
                };
index ee1f83b1a3324383b09106f01b830ea2802e210d..4c89a8e9a2e374cedfe92941661f38c2b6bc3b10 100644 (file)
@@ -69,7 +69,7 @@ static void clps711x_restart(enum reboot_mode mode, const char *cmd)
        soft_restart(0);
 }
 
-static const char *clps711x_compat[] __initconst = {
+static const char *const clps711x_compat[] __initconst = {
        "cirrus,ep7209",
        NULL
 };
index e457f299cd4430d359063788886d244a53d39f12..d6b11907380c83ddce9bc4998835a11e7a466ec7 100644 (file)
@@ -368,7 +368,7 @@ static struct spi_eeprom at25640a = {
        .flags          = EE_ADDR2,
 };
 
-static struct spi_board_info dm355_evm_spi_info[] __initconst = {
+static const struct spi_board_info dm355_evm_spi_info[] __initconst = {
        {
                .modalias       = "at25",
                .platform_data  = &at25640a,
index be997243447b949699fd116970ec0aacaffbf444..fad9a5611a5d276ce15f0fcfe9aedcc9a5d03802 100644 (file)
@@ -217,7 +217,7 @@ static struct spi_eeprom at25640a = {
        .flags          = EE_ADDR2,
 };
 
-static struct spi_board_info dm355_leopard_spi_info[] __initconst = {
+static const struct spi_board_info dm355_leopard_spi_info[] __initconst = {
        {
                .modalias       = "at25",
                .platform_data  = &at25640a,
index e75741fb2c1da095bd59ba4e0f411a6274ce6255..e3780986d2a3b40aff033f0a22759be80272cf39 100644 (file)
@@ -726,7 +726,7 @@ static struct spi_eeprom at25640 = {
        .flags          = EE_ADDR2,
 };
 
-static struct spi_board_info dm365_evm_spi_info[] __initconst = {
+static const struct spi_board_info dm365_evm_spi_info[] __initconst = {
        {
                .modalias       = "at25",
                .platform_data  = &at25640,
index 6b32dc527edcd58396dc94781992f3970cc8e793..2c20599cc3506326eddc2993afd8df2bd308e268 100644 (file)
@@ -41,7 +41,7 @@ config MACH_ARMADA_375
        depends on ARCH_MULTI_V7
        select ARMADA_370_XP_IRQ
        select ARM_ERRATA_720789
-       select ARM_ERRATA_753970
+       select PL310_ERRATA_753970
        select ARM_GIC
        select ARMADA_375_CLK
        select HAVE_ARM_SCU
@@ -57,7 +57,7 @@ config MACH_ARMADA_38X
        bool "Marvell Armada 380/385 boards"
        depends on ARCH_MULTI_V7
        select ARM_ERRATA_720789
-       select ARM_ERRATA_753970
+       select PL310_ERRATA_753970
        select ARM_GIC
        select ARM_GLOBAL_TIMER
        select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
index a80632641b39f8cb5dc8554aa2b2e85b6551e0ea..70c776ef7aa7321450def075b8f150ef1dbbbf9f 100644 (file)
@@ -165,14 +165,14 @@ spicc1: spi@15000 {
 
                        uart_A: serial@24000 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart";
-                               reg = <0x0 0x24000 0x0 0x14>;
+                               reg = <0x0 0x24000 0x0 0x18>;
                                interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_B: serial@23000 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart";
-                               reg = <0x0 0x23000 0x0 0x14>;
+                               reg = <0x0 0x23000 0x0 0x18>;
                                interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
index 6cb3c2a52bafe5f0db8a0018b35e88998ed90f01..4ee2e7951482f43122620d2668b244de1744e6b9 100644 (file)
@@ -235,14 +235,14 @@ reset: reset-controller@4404 {
 
                        uart_A: serial@84c0 {
                                compatible = "amlogic,meson-gx-uart";
-                               reg = <0x0 0x84c0 0x0 0x14>;
+                               reg = <0x0 0x84c0 0x0 0x18>;
                                interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_B: serial@84dc {
                                compatible = "amlogic,meson-gx-uart";
-                               reg = <0x0 0x84dc 0x0 0x14>;
+                               reg = <0x0 0x84dc 0x0 0x18>;
                                interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
@@ -287,7 +287,7 @@ pwm_ef: pwm@86c0 {
 
                        uart_C: serial@8700 {
                                compatible = "amlogic,meson-gx-uart";
-                               reg = <0x0 0x8700 0x0 0x14>;
+                               reg = <0x0 0x8700 0x0 0x18>;
                                interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
@@ -404,14 +404,14 @@ sec_AO: ao-secure@140 {
 
                        uart_AO: serial@4c0 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart";
-                               reg = <0x0 0x004c0 0x0 0x14>;
+                               reg = <0x0 0x004c0 0x0 0x18>;
                                interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_AO_B: serial@4e0 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart";
-                               reg = <0x0 0x004e0 0x0 0x14>;
+                               reg = <0x0 0x004e0 0x0 0x18>;
                                interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
index 4f355f17eed6bcc29dcc14fb4546fce6a083a57f..c8514110b9da2dc2f40988ad0ae733437e33420a 100644 (file)
@@ -631,6 +631,7 @@ internal_mdio: mdio@e40908ff {
 
                        internal_phy: ethernet-phy@8 {
                                compatible = "ethernet-phy-id0181.4400", "ethernet-phy-ieee802.3-c22";
+                               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <8>;
                                max-speed = <100>;
                        };
index 4220fbdcb24a7f18c5e3ab66574ba22c8c92c873..ff5c4c47b22bfecfa36f0090dc8be5c85b171271 100644 (file)
@@ -98,7 +98,7 @@ clk125mhz: uart_clk125mhz {
                clock-output-names = "clk125mhz";
        };
 
-       pci {
+       pcie@30000000 {
                compatible = "pci-host-ecam-generic";
                device_type = "pci";
                #interrupt-cells = <1>;
@@ -118,6 +118,7 @@ pci {
                ranges =
                  <0x02000000    0 0x40000000    0 0x40000000    0 0x20000000
                   0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
+               bus-range = <0 0xff>;
                interrupt-map-mask = <0 0 0 7>;
                interrupt-map =
                      /* addr  pin  ic   icaddr  icintr */
index e94fa1a531922ee6b160246c4399435574909641..047641fe294c64c9dbc04dcb477827814a809677 100644 (file)
@@ -51,7 +51,7 @@ reserved-memory {
                #size-cells = <2>;
                ranges;
 
-               ramoops@0x21f00000 {
+               ramoops@21f00000 {
                        compatible = "ramoops";
                        reg = <0x0 0x21f00000 0x0 0x00100000>;
                        record-size     = <0x00020000>;
index 9fbe4705ee88bfaf1eb12a7208de0c5899d7f9d3..94597e33c8065eb4b12ee805885988991d43e42c 100644 (file)
@@ -341,7 +341,7 @@ syscfg_pctl_a: syscfg_pctl_a@10005000 {
                        reg = <0 0x10005000 0 0x1000>;
                };
 
-               pio: pinctrl@0x10005000 {
+               pio: pinctrl@10005000 {
                        compatible = "mediatek,mt8173-pinctrl";
                        reg = <0 0x1000b000 0 0x1000>;
                        mediatek,pctl-regmap = <&syscfg_pctl_a>;
index 492a011f14f6cef933dc16ce9cf591d8cdc5c79e..1c8f1b86472de9c149b706502dcc552f19376ae5 100644 (file)
@@ -140,16 +140,16 @@ usb2_id: usb2-id {
                };
 
                agnoc@0 {
-                       qcom,pcie@00600000 {
+                       qcom,pcie@600000 {
                                perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
                        };
 
-                       qcom,pcie@00608000 {
+                       qcom,pcie@608000 {
                                status = "okay";
                                perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
                        };
 
-                       qcom,pcie@00610000 {
+                       qcom,pcie@610000 {
                                status = "okay";
                                perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
                        };
index 4b2afcc4fdf4791da816c6bba3c6f2ef7741ad8d..0a6f7952bbb18d65847261957715e331756c46c4 100644 (file)
@@ -840,7 +840,7 @@ agnoc@0 {
                        #size-cells = <1>;
                        ranges;
 
-                       pcie0: qcom,pcie@00600000 {
+                       pcie0: qcom,pcie@600000 {
                                compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
                                status = "disabled";
                                power-domains = <&gcc PCIE0_GDSC>;
@@ -893,7 +893,7 @@ pcie0: qcom,pcie@00600000 {
 
                        };
 
-                       pcie1: qcom,pcie@00608000 {
+                       pcie1: qcom,pcie@608000 {
                                compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
                                power-domains = <&gcc PCIE1_GDSC>;
                                bus-range = <0x00 0xff>;
@@ -946,7 +946,7 @@ pcie1: qcom,pcie@00608000 {
                                                "bus_slave";
                        };
 
-                       pcie2: qcom,pcie@00610000 {
+                       pcie2: qcom,pcie@610000 {
                                compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
                                power-domains = <&gcc PCIE2_GDSC>;
                                bus-range = <0x00 0xff>;
index be7bd19c87ec23949c4dcbdfe8bbd4972dbae00a..eda8c5f629fc8553af2cdccaede5dfd209cecf94 100644 (file)
@@ -20,7 +20,7 @@
 
 #define MPIDR_UP_BITMASK       (0x1 << 30)
 #define MPIDR_MT_BITMASK       (0x1 << 24)
-#define MPIDR_HWID_BITMASK     0xff00ffffff
+#define MPIDR_HWID_BITMASK     0xff00ffffffUL
 
 #define MPIDR_LEVEL_BITS_SHIFT 3
 #define MPIDR_LEVEL_BITS       (1 << MPIDR_LEVEL_BITS_SHIFT)
index 1dca41bea16ad61fc8fe6f2be528ba452bffdf27..e73f6856962461952287b244831395200cdc3853 100644 (file)
@@ -22,7 +22,7 @@
 
 static inline pte_t huge_ptep_get(pte_t *ptep)
 {
-       return *ptep;
+       return READ_ONCE(*ptep);
 }
 
 
index 9679067a15746ce921ba5a138c6508c0a4972d0a..7faed6e48b46212709485b7225c512f3fb99831e 100644 (file)
@@ -185,42 +185,42 @@ static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
        return pmd;
 }
 
-static inline void kvm_set_s2pte_readonly(pte_t *pte)
+static inline void kvm_set_s2pte_readonly(pte_t *ptep)
 {
        pteval_t old_pteval, pteval;
 
-       pteval = READ_ONCE(pte_val(*pte));
+       pteval = READ_ONCE(pte_val(*ptep));
        do {
                old_pteval = pteval;
                pteval &= ~PTE_S2_RDWR;
                pteval |= PTE_S2_RDONLY;
-               pteval = cmpxchg_relaxed(&pte_val(*pte), old_pteval, pteval);
+               pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
        } while (pteval != old_pteval);
 }
 
-static inline bool kvm_s2pte_readonly(pte_t *pte)
+static inline bool kvm_s2pte_readonly(pte_t *ptep)
 {
-       return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
+       return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
 }
 
-static inline bool kvm_s2pte_exec(pte_t *pte)
+static inline bool kvm_s2pte_exec(pte_t *ptep)
 {
-       return !(pte_val(*pte) & PTE_S2_XN);
+       return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
 }
 
-static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
+static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
 {
-       kvm_set_s2pte_readonly((pte_t *)pmd);
+       kvm_set_s2pte_readonly((pte_t *)pmdp);
 }
 
-static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
+static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
 {
-       return kvm_s2pte_readonly((pte_t *)pmd);
+       return kvm_s2pte_readonly((pte_t *)pmdp);
 }
 
-static inline bool kvm_s2pmd_exec(pmd_t *pmd)
+static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
 {
-       return !(pmd_val(*pmd) & PMD_S2_XN);
+       return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
 }
 
 static inline bool kvm_page_empty(void *ptr)
index 8d3331985d2e34b2099eab6cec8b456d40983052..39ec0b8a689eea3e495029685bed047737d64c5e 100644 (file)
@@ -141,13 +141,13 @@ static inline void cpu_install_idmap(void)
  * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
  * avoiding the possibility of conflicting TLB entries being allocated.
  */
-static inline void cpu_replace_ttbr1(pgd_t *pgd)
+static inline void cpu_replace_ttbr1(pgd_t *pgdp)
 {
        typedef void (ttbr_replace_func)(phys_addr_t);
        extern ttbr_replace_func idmap_cpu_replace_ttbr1;
        ttbr_replace_func *replace_phys;
 
-       phys_addr_t pgd_phys = virt_to_phys(pgd);
+       phys_addr_t pgd_phys = virt_to_phys(pgdp);
 
        replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
 
index e9d9f1b006efec5708fd0f33c006d2e017711a4d..2e05bcd944c8395b9fd5af993b6214054352aefc 100644 (file)
@@ -36,23 +36,23 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
        return (pmd_t *)__get_free_page(PGALLOC_GFP);
 }
 
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
 {
-       BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
-       free_page((unsigned long)pmd);
+       BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
+       free_page((unsigned long)pmdp);
 }
 
-static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
 {
-       set_pud(pud, __pud(__phys_to_pud_val(pmd) | prot));
+       set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot));
 }
 
-static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
 {
-       __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
+       __pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
 }
 #else
-static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
 {
        BUILD_BUG();
 }
@@ -65,30 +65,30 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
        return (pud_t *)__get_free_page(PGALLOC_GFP);
 }
 
-static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
 {
-       BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
-       free_page((unsigned long)pud);
+       BUG_ON((unsigned long)pudp & (PAGE_SIZE-1));
+       free_page((unsigned long)pudp);
 }
 
-static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
 {
-       set_pgd(pgdp, __pgd(__phys_to_pgd_val(pud) | prot));
+       set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot));
 }
 
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
 {
-       __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
+       __pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE);
 }
 #else
-static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
 {
        BUILD_BUG();
 }
 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
 
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
 
 static inline pte_t *
 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
@@ -114,10 +114,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
 /*
  * Free a PTE table.
  */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
 {
-       if (pte)
-               free_page((unsigned long)pte);
+       if (ptep)
+               free_page((unsigned long)ptep);
 }
 
 static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
@@ -126,10 +126,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
        __free_page(pte);
 }
 
-static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
+static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
                                  pmdval_t prot)
 {
-       set_pmd(pmdp, __pmd(__phys_to_pmd_val(pte) | prot));
+       set_pmd(pmdp, __pmd(__phys_to_pmd_val(ptep) | prot));
 }
 
 /*
index 094374c82db088816d6a35ec75e1658dfc446ec4..7e2c27e63cd894371655a569046faaa67cfc1837 100644 (file)
@@ -218,7 +218,7 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
 
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
-       *ptep = pte;
+       WRITE_ONCE(*ptep, pte);
 
        /*
         * Only if the new pte is valid and kernel, otherwise TLB maintenance
@@ -250,6 +250,8 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep, pte_t pte)
 {
+       pte_t old_pte;
+
        if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
                __sync_icache_dcache(pte, addr);
 
@@ -258,14 +260,15 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
         * hardware updates of the pte (ptep_set_access_flags safely changes
         * valid ptes without going through an invalid entry).
         */
-       if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) &&
+       old_pte = READ_ONCE(*ptep);
+       if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(old_pte) && pte_valid(pte) &&
           (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
                VM_WARN_ONCE(!pte_young(pte),
                             "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
-                            __func__, pte_val(*ptep), pte_val(pte));
-               VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
+                            __func__, pte_val(old_pte), pte_val(pte));
+               VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
                             "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
-                            __func__, pte_val(*ptep), pte_val(pte));
+                            __func__, pte_val(old_pte), pte_val(pte));
        }
 
        set_pte(ptep, pte);
@@ -431,7 +434,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-       *pmdp = pmd;
+       WRITE_ONCE(*pmdp, pmd);
        dsb(ishst);
        isb();
 }
@@ -482,7 +485,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
 {
-       *pudp = pud;
+       WRITE_ONCE(*pudp, pud);
        dsb(ishst);
        isb();
 }
@@ -500,7 +503,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
 /* Find an entry in the second-level page table. */
 #define pmd_index(addr)                (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 
-#define pmd_offset_phys(dir, addr)     (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
+#define pmd_offset_phys(dir, addr)     (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
 #define pmd_offset(dir, addr)          ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
 
 #define pmd_set_fixmap(addr)           ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
@@ -535,7 +538,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
 
 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-       *pgdp = pgd;
+       WRITE_ONCE(*pgdp, pgd);
        dsb(ishst);
 }
 
@@ -552,7 +555,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
 /* Find an entry in the frst-level page table. */
 #define pud_index(addr)                (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
 
-#define pud_offset_phys(dir, addr)     (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
+#define pud_offset_phys(dir, addr)     (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
 #define pud_offset(dir, addr)          ((pud_t *)__va(pud_offset_phys((dir), (addr))))
 
 #define pud_set_fixmap(addr)           ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
index 07823595b7f01690823da724584965bca0872588..52f15cd896e11ad631ac3092d9709337a9629bb4 100644 (file)
@@ -406,6 +406,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
                MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
        },
+       {
+               .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+               MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+               .enable = qcom_enable_link_stack_sanitization,
+       },
+       {
+               .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+               MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+       },
        {
                .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
                MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
index f85ac58d08a35676f38fa2a6d7b0887fb6f1d2ec..a8bf1c892b9065ca40ed4b263317deced2b8d693 100644 (file)
@@ -90,7 +90,7 @@ static int __init set_permissions(pte_t *ptep, pgtable_t token,
                                  unsigned long addr, void *data)
 {
        efi_memory_desc_t *md = data;
-       pte_t pte = *ptep;
+       pte_t pte = READ_ONCE(*ptep);
 
        if (md->attribute & EFI_MEMORY_RO)
                pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
index f20cf7e992495adffcd2049c4c40f9ec6798c99f..1ec5f28c39fc56c4aae85cc5801bd513cc3ea2c3 100644 (file)
@@ -202,10 +202,10 @@ static int create_safe_exec_page(void *src_start, size_t length,
                                 gfp_t mask)
 {
        int rc = 0;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
+       pgd_t *pgdp;
+       pud_t *pudp;
+       pmd_t *pmdp;
+       pte_t *ptep;
        unsigned long dst = (unsigned long)allocator(mask);
 
        if (!dst) {
@@ -216,38 +216,38 @@ static int create_safe_exec_page(void *src_start, size_t length,
        memcpy((void *)dst, src_start, length);
        flush_icache_range(dst, dst + length);
 
-       pgd = pgd_offset_raw(allocator(mask), dst_addr);
-       if (pgd_none(*pgd)) {
-               pud = allocator(mask);
-               if (!pud) {
+       pgdp = pgd_offset_raw(allocator(mask), dst_addr);
+       if (pgd_none(READ_ONCE(*pgdp))) {
+               pudp = allocator(mask);
+               if (!pudp) {
                        rc = -ENOMEM;
                        goto out;
                }
-               pgd_populate(&init_mm, pgd, pud);
+               pgd_populate(&init_mm, pgdp, pudp);
        }
 
-       pud = pud_offset(pgd, dst_addr);
-       if (pud_none(*pud)) {
-               pmd = allocator(mask);
-               if (!pmd) {
+       pudp = pud_offset(pgdp, dst_addr);
+       if (pud_none(READ_ONCE(*pudp))) {
+               pmdp = allocator(mask);
+               if (!pmdp) {
                        rc = -ENOMEM;
                        goto out;
                }
-               pud_populate(&init_mm, pud, pmd);
+               pud_populate(&init_mm, pudp, pmdp);
        }
 
-       pmd = pmd_offset(pud, dst_addr);
-       if (pmd_none(*pmd)) {
-               pte = allocator(mask);
-               if (!pte) {
+       pmdp = pmd_offset(pudp, dst_addr);
+       if (pmd_none(READ_ONCE(*pmdp))) {
+               ptep = allocator(mask);
+               if (!ptep) {
                        rc = -ENOMEM;
                        goto out;
                }
-               pmd_populate_kernel(&init_mm, pmd, pte);
+               pmd_populate_kernel(&init_mm, pmdp, ptep);
        }
 
-       pte = pte_offset_kernel(pmd, dst_addr);
-       set_pte(pte, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
+       ptep = pte_offset_kernel(pmdp, dst_addr);
+       set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
 
        /*
         * Load our new page tables. A strict BBM approach requires that we
@@ -263,7 +263,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
         */
        cpu_set_reserved_ttbr0();
        local_flush_tlb_all();
-       write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1);
+       write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
        isb();
 
        *phys_dst_addr = virt_to_phys((void *)dst);
@@ -320,9 +320,9 @@ int swsusp_arch_suspend(void)
        return ret;
 }
 
-static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
+static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
 {
-       pte_t pte = *src_pte;
+       pte_t pte = READ_ONCE(*src_ptep);
 
        if (pte_valid(pte)) {
                /*
@@ -330,7 +330,7 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
                 * read only (code, rodata). Clear the RDONLY bit from
                 * the temporary mappings we use during restore.
                 */
-               set_pte(dst_pte, pte_mkwrite(pte));
+               set_pte(dst_ptep, pte_mkwrite(pte));
        } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
                /*
                 * debug_pagealloc will removed the PTE_VALID bit if
@@ -343,112 +343,116 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
                 */
                BUG_ON(!pfn_valid(pte_pfn(pte)));
 
-               set_pte(dst_pte, pte_mkpresent(pte_mkwrite(pte)));
+               set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
        }
 }
 
-static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
+static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
                    unsigned long end)
 {
-       pte_t *src_pte;
-       pte_t *dst_pte;
+       pte_t *src_ptep;
+       pte_t *dst_ptep;
        unsigned long addr = start;
 
-       dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
-       if (!dst_pte)
+       dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
+       if (!dst_ptep)
                return -ENOMEM;
-       pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
-       dst_pte = pte_offset_kernel(dst_pmd, start);
+       pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep);
+       dst_ptep = pte_offset_kernel(dst_pmdp, start);
 
-       src_pte = pte_offset_kernel(src_pmd, start);
+       src_ptep = pte_offset_kernel(src_pmdp, start);
        do {
-               _copy_pte(dst_pte, src_pte, addr);
-       } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
+               _copy_pte(dst_ptep, src_ptep, addr);
+       } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
 
        return 0;
 }
 
-static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
+static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
                    unsigned long end)
 {
-       pmd_t *src_pmd;
-       pmd_t *dst_pmd;
+       pmd_t *src_pmdp;
+       pmd_t *dst_pmdp;
        unsigned long next;
        unsigned long addr = start;
 
-       if (pud_none(*dst_pud)) {
-               dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
-               if (!dst_pmd)
+       if (pud_none(READ_ONCE(*dst_pudp))) {
+               dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
+               if (!dst_pmdp)
                        return -ENOMEM;
-               pud_populate(&init_mm, dst_pud, dst_pmd);
+               pud_populate(&init_mm, dst_pudp, dst_pmdp);
        }
-       dst_pmd = pmd_offset(dst_pud, start);
+       dst_pmdp = pmd_offset(dst_pudp, start);
 
-       src_pmd = pmd_offset(src_pud, start);
+       src_pmdp = pmd_offset(src_pudp, start);
        do {
+               pmd_t pmd = READ_ONCE(*src_pmdp);
+
                next = pmd_addr_end(addr, end);
-               if (pmd_none(*src_pmd))
+               if (pmd_none(pmd))
                        continue;
-               if (pmd_table(*src_pmd)) {
-                       if (copy_pte(dst_pmd, src_pmd, addr, next))
+               if (pmd_table(pmd)) {
+                       if (copy_pte(dst_pmdp, src_pmdp, addr, next))
                                return -ENOMEM;
                } else {
-                       set_pmd(dst_pmd,
-                               __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
+                       set_pmd(dst_pmdp,
+                               __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
                }
-       } while (dst_pmd++, src_pmd++, addr = next, addr != end);
+       } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
 
        return 0;
 }
 
-static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
+static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
                    unsigned long end)
 {
-       pud_t *dst_pud;
-       pud_t *src_pud;
+       pud_t *dst_pudp;
+       pud_t *src_pudp;
        unsigned long next;
        unsigned long addr = start;
 
-       if (pgd_none(*dst_pgd)) {
-               dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
-               if (!dst_pud)
+       if (pgd_none(READ_ONCE(*dst_pgdp))) {
+               dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
+               if (!dst_pudp)
                        return -ENOMEM;
-               pgd_populate(&init_mm, dst_pgd, dst_pud);
+               pgd_populate(&init_mm, dst_pgdp, dst_pudp);
        }
-       dst_pud = pud_offset(dst_pgd, start);
+       dst_pudp = pud_offset(dst_pgdp, start);
 
-       src_pud = pud_offset(src_pgd, start);
+       src_pudp = pud_offset(src_pgdp, start);
        do {
+               pud_t pud = READ_ONCE(*src_pudp);
+
                next = pud_addr_end(addr, end);
-               if (pud_none(*src_pud))
+               if (pud_none(pud))
                        continue;
-               if (pud_table(*(src_pud))) {
-                       if (copy_pmd(dst_pud, src_pud, addr, next))
+               if (pud_table(pud)) {
+                       if (copy_pmd(dst_pudp, src_pudp, addr, next))
                                return -ENOMEM;
                } else {
-                       set_pud(dst_pud,
-                               __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
+                       set_pud(dst_pudp,
+                               __pud(pud_val(pud) & ~PMD_SECT_RDONLY));
                }
-       } while (dst_pud++, src_pud++, addr = next, addr != end);
+       } while (dst_pudp++, src_pudp++, addr = next, addr != end);
 
        return 0;
 }
 
-static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
+static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
                            unsigned long end)
 {
        unsigned long next;
        unsigned long addr = start;
-       pgd_t *src_pgd = pgd_offset_k(start);
+       pgd_t *src_pgdp = pgd_offset_k(start);
 
-       dst_pgd = pgd_offset_raw(dst_pgd, start);
+       dst_pgdp = pgd_offset_raw(dst_pgdp, start);
        do {
                next = pgd_addr_end(addr, end);
-               if (pgd_none(*src_pgd))
+               if (pgd_none(READ_ONCE(*src_pgdp)))
                        continue;
-               if (copy_pud(dst_pgd, src_pgd, addr, next))
+               if (copy_pud(dst_pgdp, src_pgdp, addr, next))
                        return -ENOMEM;
-       } while (dst_pgd++, src_pgd++, addr = next, addr != end);
+       } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
 
        return 0;
 }
index 116252a8d3a5507295ed30abfd740e1cc07446ae..870f4b1587f97496c3fd427fe6b652bae1a96cd7 100644 (file)
@@ -407,8 +407,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
                u32 midr = read_cpuid_id();
 
                /* Apply BTAC predictors mitigation to all Falkor chips */
-               if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
+               if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
+                   ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
                        __qcom_hyp_sanitize_btac_predictors();
+               }
        }
 
        fp_enabled = __fpsimd_enabled();
index 7b60d62ac5939e83c8e153ec1c3a0447565f23eb..65dfc8571bf8397c3f2a6297d21b5112794461e1 100644 (file)
@@ -286,48 +286,52 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 
 }
 
-static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
 {
-       pte_t *pte = pte_offset_kernel(pmd, 0UL);
+       pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
        unsigned long addr;
        unsigned i;
 
-       for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+       for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
                addr = start + i * PAGE_SIZE;
-               note_page(st, addr, 4, pte_val(*pte));
+               note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
        }
 }
 
-static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
+static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
 {
-       pmd_t *pmd = pmd_offset(pud, 0UL);
+       pmd_t *pmdp = pmd_offset(pudp, 0UL);
        unsigned long addr;
        unsigned i;
 
-       for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
+       for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
+               pmd_t pmd = READ_ONCE(*pmdp);
+
                addr = start + i * PMD_SIZE;
-               if (pmd_none(*pmd) || pmd_sect(*pmd)) {
-                       note_page(st, addr, 3, pmd_val(*pmd));
+               if (pmd_none(pmd) || pmd_sect(pmd)) {
+                       note_page(st, addr, 3, pmd_val(pmd));
                } else {
-                       BUG_ON(pmd_bad(*pmd));
-                       walk_pte(st, pmd, addr);
+                       BUG_ON(pmd_bad(pmd));
+                       walk_pte(st, pmdp, addr);
                }
        }
 }
 
-static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
 {
-       pud_t *pud = pud_offset(pgd, 0UL);
+       pud_t *pudp = pud_offset(pgdp, 0UL);
        unsigned long addr;
        unsigned i;
 
-       for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
+       for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
+               pud_t pud = READ_ONCE(*pudp);
+
                addr = start + i * PUD_SIZE;
-               if (pud_none(*pud) || pud_sect(*pud)) {
-                       note_page(st, addr, 2, pud_val(*pud));
+               if (pud_none(pud) || pud_sect(pud)) {
+                       note_page(st, addr, 2, pud_val(pud));
                } else {
-                       BUG_ON(pud_bad(*pud));
-                       walk_pmd(st, pud, addr);
+                       BUG_ON(pud_bad(pud));
+                       walk_pmd(st, pudp, addr);
                }
        }
 }
@@ -335,17 +339,19 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
 static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
                     unsigned long start)
 {
-       pgd_t *pgd = pgd_offset(mm, 0UL);
+       pgd_t *pgdp = pgd_offset(mm, 0UL);
        unsigned i;
        unsigned long addr;
 
-       for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+       for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
+               pgd_t pgd = READ_ONCE(*pgdp);
+
                addr = start + i * PGDIR_SIZE;
-               if (pgd_none(*pgd)) {
-                       note_page(st, addr, 1, pgd_val(*pgd));
+               if (pgd_none(pgd)) {
+                       note_page(st, addr, 1, pgd_val(pgd));
                } else {
-                       BUG_ON(pgd_bad(*pgd));
-                       walk_pud(st, pgd, addr);
+                       BUG_ON(pgd_bad(pgd));
+                       walk_pud(st, pgdp, addr);
                }
        }
 }
index f76bb2c3c9434dc29c572d4103f9eb10b42dc278..bff11553eb050306dfa9df7fec0682f6a03cbf61 100644 (file)
@@ -130,7 +130,8 @@ static void mem_abort_decode(unsigned int esr)
 void show_pte(unsigned long addr)
 {
        struct mm_struct *mm;
-       pgd_t *pgd;
+       pgd_t *pgdp;
+       pgd_t pgd;
 
        if (addr < TASK_SIZE) {
                /* TTBR0 */
@@ -149,33 +150,37 @@ void show_pte(unsigned long addr)
                return;
        }
 
-       pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgd = %p\n",
+       pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
                 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
                 VA_BITS, mm->pgd);
-       pgd = pgd_offset(mm, addr);
-       pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd));
+       pgdp = pgd_offset(mm, addr);
+       pgd = READ_ONCE(*pgdp);
+       pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
 
        do {
-               pud_t *pud;
-               pmd_t *pmd;
-               pte_t *pte;
+               pud_t *pudp, pud;
+               pmd_t *pmdp, pmd;
+               pte_t *ptep, pte;
 
-               if (pgd_none(*pgd) || pgd_bad(*pgd))
+               if (pgd_none(pgd) || pgd_bad(pgd))
                        break;
 
-               pud = pud_offset(pgd, addr);
-               pr_cont(", *pud=%016llx", pud_val(*pud));
-               if (pud_none(*pud) || pud_bad(*pud))
+               pudp = pud_offset(pgdp, addr);
+               pud = READ_ONCE(*pudp);
+               pr_cont(", pud=%016llx", pud_val(pud));
+               if (pud_none(pud) || pud_bad(pud))
                        break;
 
-               pmd = pmd_offset(pud, addr);
-               pr_cont(", *pmd=%016llx", pmd_val(*pmd));
-               if (pmd_none(*pmd) || pmd_bad(*pmd))
+               pmdp = pmd_offset(pudp, addr);
+               pmd = READ_ONCE(*pmdp);
+               pr_cont(", pmd=%016llx", pmd_val(pmd));
+               if (pmd_none(pmd) || pmd_bad(pmd))
                        break;
 
-               pte = pte_offset_map(pmd, addr);
-               pr_cont(", *pte=%016llx", pte_val(*pte));
-               pte_unmap(pte);
+               ptep = pte_offset_map(pmdp, addr);
+               pte = READ_ONCE(*ptep);
+               pr_cont(", pte=%016llx", pte_val(pte));
+               pte_unmap(ptep);
        } while(0);
 
        pr_cont("\n");
@@ -196,8 +201,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
                          pte_t entry, int dirty)
 {
        pteval_t old_pteval, pteval;
+       pte_t pte = READ_ONCE(*ptep);
 
-       if (pte_same(*ptep, entry))
+       if (pte_same(pte, entry))
                return 0;
 
        /* only preserve the access flags and write permission */
@@ -210,7 +216,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
         * (calculated as: a & b == ~(~a | ~b)).
         */
        pte_val(entry) ^= PTE_RDONLY;
-       pteval = READ_ONCE(pte_val(*ptep));
+       pteval = pte_val(pte);
        do {
                old_pteval = pteval;
                pteval ^= PTE_RDONLY;
index 6cb0fa92a65162ecce1e84b8ef09177fcc54785d..ecc6818191df961eac49e6ca0c7d8b8d38d0c855 100644 (file)
@@ -54,14 +54,14 @@ static inline pgprot_t pte_pgprot(pte_t pte)
 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
                           pte_t *ptep, size_t *pgsize)
 {
-       pgd_t *pgd = pgd_offset(mm, addr);
-       pud_t *pud;
-       pmd_t *pmd;
+       pgd_t *pgdp = pgd_offset(mm, addr);
+       pud_t *pudp;
+       pmd_t *pmdp;
 
        *pgsize = PAGE_SIZE;
-       pud = pud_offset(pgd, addr);
-       pmd = pmd_offset(pud, addr);
-       if ((pte_t *)pmd == ptep) {
+       pudp = pud_offset(pgdp, addr);
+       pmdp = pmd_offset(pudp, addr);
+       if ((pte_t *)pmdp == ptep) {
                *pgsize = PMD_SIZE;
                return CONT_PMDS;
        }
@@ -181,11 +181,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 
        clear_flush(mm, addr, ptep, pgsize, ncontig);
 
-       for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) {
-               pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
-                        pte_val(pfn_pte(pfn, hugeprot)));
+       for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
                set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
-       }
 }
 
 void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
@@ -203,20 +200,20 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
 pte_t *huge_pte_alloc(struct mm_struct *mm,
                      unsigned long addr, unsigned long sz)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pte_t *pte = NULL;
-
-       pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
-       pgd = pgd_offset(mm, addr);
-       pud = pud_alloc(mm, pgd, addr);
-       if (!pud)
+       pgd_t *pgdp;
+       pud_t *pudp;
+       pmd_t *pmdp;
+       pte_t *ptep = NULL;
+
+       pgdp = pgd_offset(mm, addr);
+       pudp = pud_alloc(mm, pgdp, addr);
+       if (!pudp)
                return NULL;
 
        if (sz == PUD_SIZE) {
-               pte = (pte_t *)pud;
+               ptep = (pte_t *)pudp;
        } else if (sz == (PAGE_SIZE * CONT_PTES)) {
-               pmd_t *pmd = pmd_alloc(mm, pud, addr);
+               pmdp = pmd_alloc(mm, pudp, addr);
 
                WARN_ON(addr & (sz - 1));
                /*
@@ -226,60 +223,55 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                 * will be no pte_unmap() to correspond with this
                 * pte_alloc_map().
                 */
-               pte = pte_alloc_map(mm, pmd, addr);
+               ptep = pte_alloc_map(mm, pmdp, addr);
        } else if (sz == PMD_SIZE) {
                if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
-                   pud_none(*pud))
-                       pte = huge_pmd_share(mm, addr, pud);
+                   pud_none(READ_ONCE(*pudp)))
+                       ptep = huge_pmd_share(mm, addr, pudp);
                else
-                       pte = (pte_t *)pmd_alloc(mm, pud, addr);
+                       ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
        } else if (sz == (PMD_SIZE * CONT_PMDS)) {
-               pmd_t *pmd;
-
-               pmd = pmd_alloc(mm, pud, addr);
+               pmdp = pmd_alloc(mm, pudp, addr);
                WARN_ON(addr & (sz - 1));
-               return (pte_t *)pmd;
+               return (pte_t *)pmdp;
        }
 
-       pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
-              sz, pte, pte_val(*pte));
-       return pte;
+       return ptep;
 }
 
 pte_t *huge_pte_offset(struct mm_struct *mm,
                       unsigned long addr, unsigned long sz)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
+       pgd_t *pgdp;
+       pud_t *pudp, pud;
+       pmd_t *pmdp, pmd;
 
-       pgd = pgd_offset(mm, addr);
-       pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
-       if (!pgd_present(*pgd))
+       pgdp = pgd_offset(mm, addr);
+       if (!pgd_present(READ_ONCE(*pgdp)))
                return NULL;
 
-       pud = pud_offset(pgd, addr);
-       if (sz != PUD_SIZE && pud_none(*pud))
+       pudp = pud_offset(pgdp, addr);
+       pud = READ_ONCE(*pudp);
+       if (sz != PUD_SIZE && pud_none(pud))
                return NULL;
        /* hugepage or swap? */
-       if (pud_huge(*pud) || !pud_present(*pud))
-               return (pte_t *)pud;
+       if (pud_huge(pud) || !pud_present(pud))
+               return (pte_t *)pudp;
        /* table; check the next level */
 
        if (sz == CONT_PMD_SIZE)
                addr &= CONT_PMD_MASK;
 
-       pmd = pmd_offset(pud, addr);
+       pmdp = pmd_offset(pudp, addr);
+       pmd = READ_ONCE(*pmdp);
        if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
-           pmd_none(*pmd))
+           pmd_none(pmd))
                return NULL;
-       if (pmd_huge(*pmd) || !pmd_present(*pmd))
-               return (pte_t *)pmd;
+       if (pmd_huge(pmd) || !pmd_present(pmd))
+               return (pte_t *)pmdp;
 
-       if (sz == CONT_PTE_SIZE) {
-               pte_t *pte = pte_offset_kernel(pmd, (addr & CONT_PTE_MASK));
-               return pte;
-       }
+       if (sz == CONT_PTE_SIZE)
+               return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
 
        return NULL;
 }
@@ -367,7 +359,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
        size_t pgsize;
        pte_t pte;
 
-       if (!pte_cont(*ptep)) {
+       if (!pte_cont(READ_ONCE(*ptep))) {
                ptep_set_wrprotect(mm, addr, ptep);
                return;
        }
@@ -391,7 +383,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
        size_t pgsize;
        int ncontig;
 
-       if (!pte_cont(*ptep)) {
+       if (!pte_cont(READ_ONCE(*ptep))) {
                ptep_clear_flush(vma, addr, ptep);
                return;
        }
index 6e02e6fb4c7b9e12da9796b2e8a2be68ca143ae0..dabfc1ecda3d3a9d57a430f1641eca05c1114703 100644 (file)
@@ -44,92 +44,92 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
        return __pa(p);
 }
 
-static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node,
+static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
                                      bool early)
 {
-       if (pmd_none(*pmd)) {
+       if (pmd_none(READ_ONCE(*pmdp))) {
                phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
                                             : kasan_alloc_zeroed_page(node);
-               __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
+               __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
        }
 
-       return early ? pte_offset_kimg(pmd, addr)
-                    : pte_offset_kernel(pmd, addr);
+       return early ? pte_offset_kimg(pmdp, addr)
+                    : pte_offset_kernel(pmdp, addr);
 }
 
-static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node,
+static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
                                      bool early)
 {
-       if (pud_none(*pud)) {
+       if (pud_none(READ_ONCE(*pudp))) {
                phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
                                             : kasan_alloc_zeroed_page(node);
-               __pud_populate(pud, pmd_phys, PMD_TYPE_TABLE);
+               __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
        }
 
-       return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr);
+       return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
 }
 
-static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node,
+static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
                                      bool early)
 {
-       if (pgd_none(*pgd)) {
+       if (pgd_none(READ_ONCE(*pgdp))) {
                phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
                                             : kasan_alloc_zeroed_page(node);
-               __pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE);
+               __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
        }
 
-       return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr);
+       return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
 }
 
-static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr,
+static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
                                      unsigned long end, int node, bool early)
 {
        unsigned long next;
-       pte_t *pte = kasan_pte_offset(pmd, addr, node, early);
+       pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
 
        do {
                phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
                                              : kasan_alloc_zeroed_page(node);
                next = addr + PAGE_SIZE;
-               set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
-       } while (pte++, addr = next, addr != end && pte_none(*pte));
+               set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
+       } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
 }
 
-static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr,
+static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
                                      unsigned long end, int node, bool early)
 {
        unsigned long next;
-       pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early);
+       pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
 
        do {
                next = pmd_addr_end(addr, end);
-               kasan_pte_populate(pmd, addr, next, node, early);
-       } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
+               kasan_pte_populate(pmdp, addr, next, node, early);
+       } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
 }
 
-static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr,
+static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
                                      unsigned long end, int node, bool early)
 {
        unsigned long next;
-       pud_t *pud = kasan_pud_offset(pgd, addr, node, early);
+       pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
 
        do {
                next = pud_addr_end(addr, end);
-               kasan_pmd_populate(pud, addr, next, node, early);
-       } while (pud++, addr = next, addr != end && pud_none(*pud));
+               kasan_pmd_populate(pudp, addr, next, node, early);
+       } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
 }
 
 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
                                      int node, bool early)
 {
        unsigned long next;
-       pgd_t *pgd;
+       pgd_t *pgdp;
 
-       pgd = pgd_offset_k(addr);
+       pgdp = pgd_offset_k(addr);
        do {
                next = pgd_addr_end(addr, end);
-               kasan_pud_populate(pgd, addr, next, node, early);
-       } while (pgd++, addr = next, addr != end);
+               kasan_pud_populate(pgdp, addr, next, node, early);
+       } while (pgdp++, addr = next, addr != end);
 }
 
 /* The early shadow maps everything to a single page of zeroes */
@@ -155,14 +155,14 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
  */
 void __init kasan_copy_shadow(pgd_t *pgdir)
 {
-       pgd_t *pgd, *pgd_new, *pgd_end;
+       pgd_t *pgdp, *pgdp_new, *pgdp_end;
 
-       pgd = pgd_offset_k(KASAN_SHADOW_START);
-       pgd_end = pgd_offset_k(KASAN_SHADOW_END);
-       pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
+       pgdp = pgd_offset_k(KASAN_SHADOW_START);
+       pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
+       pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
        do {
-               set_pgd(pgd_new, *pgd);
-       } while (pgd++, pgd_new++, pgd != pgd_end);
+               set_pgd(pgdp_new, READ_ONCE(*pgdp));
+       } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
 }
 
 static void __init clear_pgds(unsigned long start,
index 4694cda823c9541527b95f269658bdbc4b8243d7..3161b853f29e1d35a21b0da5a01f571c995616f3 100644 (file)
@@ -125,45 +125,48 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
        return ((old ^ new) & ~mask) == 0;
 }
 
-static void init_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
+static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
                     phys_addr_t phys, pgprot_t prot)
 {
-       pte_t *pte;
+       pte_t *ptep;
 
-       pte = pte_set_fixmap_offset(pmd, addr);
+       ptep = pte_set_fixmap_offset(pmdp, addr);
        do {
-               pte_t old_pte = *pte;
+               pte_t old_pte = READ_ONCE(*ptep);
 
-               set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot));
+               set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
 
                /*
                 * After the PTE entry has been populated once, we
                 * only allow updates to the permission attributes.
                 */
-               BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
+               BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
+                                             READ_ONCE(pte_val(*ptep))));
 
                phys += PAGE_SIZE;
-       } while (pte++, addr += PAGE_SIZE, addr != end);
+       } while (ptep++, addr += PAGE_SIZE, addr != end);
 
        pte_clear_fixmap();
 }
 
-static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
+static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
                                unsigned long end, phys_addr_t phys,
                                pgprot_t prot,
                                phys_addr_t (*pgtable_alloc)(void),
                                int flags)
 {
        unsigned long next;
+       pmd_t pmd = READ_ONCE(*pmdp);
 
-       BUG_ON(pmd_sect(*pmd));
-       if (pmd_none(*pmd)) {
+       BUG_ON(pmd_sect(pmd));
+       if (pmd_none(pmd)) {
                phys_addr_t pte_phys;
                BUG_ON(!pgtable_alloc);
                pte_phys = pgtable_alloc();
-               __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
+               __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
+               pmd = READ_ONCE(*pmdp);
        }
-       BUG_ON(pmd_bad(*pmd));
+       BUG_ON(pmd_bad(pmd));
 
        do {
                pgprot_t __prot = prot;
@@ -175,67 +178,69 @@ static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
                    (flags & NO_CONT_MAPPINGS) == 0)
                        __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 
-               init_pte(pmd, addr, next, phys, __prot);
+               init_pte(pmdp, addr, next, phys, __prot);
 
                phys += next - addr;
        } while (addr = next, addr != end);
 }
 
-static void init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
                     phys_addr_t phys, pgprot_t prot,
                     phys_addr_t (*pgtable_alloc)(void), int flags)
 {
        unsigned long next;
-       pmd_t *pmd;
+       pmd_t *pmdp;
 
-       pmd = pmd_set_fixmap_offset(pud, addr);
+       pmdp = pmd_set_fixmap_offset(pudp, addr);
        do {
-               pmd_t old_pmd = *pmd;
+               pmd_t old_pmd = READ_ONCE(*pmdp);
 
                next = pmd_addr_end(addr, end);
 
                /* try section mapping first */
                if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
                    (flags & NO_BLOCK_MAPPINGS) == 0) {
-                       pmd_set_huge(pmd, phys, prot);
+                       pmd_set_huge(pmdp, phys, prot);
 
                        /*
                         * After the PMD entry has been populated once, we
                         * only allow updates to the permission attributes.
                         */
                        BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
-                                                     pmd_val(*pmd)));
+                                                     READ_ONCE(pmd_val(*pmdp))));
                } else {
-                       alloc_init_cont_pte(pmd, addr, next, phys, prot,
+                       alloc_init_cont_pte(pmdp, addr, next, phys, prot,
                                            pgtable_alloc, flags);
 
                        BUG_ON(pmd_val(old_pmd) != 0 &&
-                              pmd_val(old_pmd) != pmd_val(*pmd));
+                              pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
                }
                phys += next - addr;
-       } while (pmd++, addr = next, addr != end);
+       } while (pmdp++, addr = next, addr != end);
 
        pmd_clear_fixmap();
 }
 
-static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
+static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
                                unsigned long end, phys_addr_t phys,
                                pgprot_t prot,
                                phys_addr_t (*pgtable_alloc)(void), int flags)
 {
        unsigned long next;
+       pud_t pud = READ_ONCE(*pudp);
 
        /*
         * Check for initial section mappings in the pgd/pud.
         */
-       BUG_ON(pud_sect(*pud));
-       if (pud_none(*pud)) {
+       BUG_ON(pud_sect(pud));
+       if (pud_none(pud)) {
                phys_addr_t pmd_phys;
                BUG_ON(!pgtable_alloc);
                pmd_phys = pgtable_alloc();
-               __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
+               __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
+               pud = READ_ONCE(*pudp);
        }
-       BUG_ON(pud_bad(*pud));
+       BUG_ON(pud_bad(pud));
 
        do {
                pgprot_t __prot = prot;
@@ -247,7 +252,7 @@ static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
                    (flags & NO_CONT_MAPPINGS) == 0)
                        __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 
-               init_pmd(pud, addr, next, phys, __prot, pgtable_alloc, flags);
+               init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
 
                phys += next - addr;
        } while (addr = next, addr != end);
@@ -265,25 +270,27 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
        return true;
 }
 
-static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
-                                 phys_addr_t phys, pgprot_t prot,
-                                 phys_addr_t (*pgtable_alloc)(void),
-                                 int flags)
+static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
+                          phys_addr_t phys, pgprot_t prot,
+                          phys_addr_t (*pgtable_alloc)(void),
+                          int flags)
 {
-       pud_t *pud;
        unsigned long next;
+       pud_t *pudp;
+       pgd_t pgd = READ_ONCE(*pgdp);
 
-       if (pgd_none(*pgd)) {
+       if (pgd_none(pgd)) {
                phys_addr_t pud_phys;
                BUG_ON(!pgtable_alloc);
                pud_phys = pgtable_alloc();
-               __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
+               __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
+               pgd = READ_ONCE(*pgdp);
        }
-       BUG_ON(pgd_bad(*pgd));
+       BUG_ON(pgd_bad(pgd));
 
-       pud = pud_set_fixmap_offset(pgd, addr);
+       pudp = pud_set_fixmap_offset(pgdp, addr);
        do {
-               pud_t old_pud = *pud;
+               pud_t old_pud = READ_ONCE(*pudp);
 
                next = pud_addr_end(addr, end);
 
@@ -292,23 +299,23 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
                 */
                if (use_1G_block(addr, next, phys) &&
                    (flags & NO_BLOCK_MAPPINGS) == 0) {
-                       pud_set_huge(pud, phys, prot);
+                       pud_set_huge(pudp, phys, prot);
 
                        /*
                         * After the PUD entry has been populated once, we
                         * only allow updates to the permission attributes.
                         */
                        BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
-                                                     pud_val(*pud)));
+                                                     READ_ONCE(pud_val(*pudp))));
                } else {
-                       alloc_init_cont_pmd(pud, addr, next, phys, prot,
+                       alloc_init_cont_pmd(pudp, addr, next, phys, prot,
                                            pgtable_alloc, flags);
 
                        BUG_ON(pud_val(old_pud) != 0 &&
-                              pud_val(old_pud) != pud_val(*pud));
+                              pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
                }
                phys += next - addr;
-       } while (pud++, addr = next, addr != end);
+       } while (pudp++, addr = next, addr != end);
 
        pud_clear_fixmap();
 }
@@ -320,7 +327,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
                                 int flags)
 {
        unsigned long addr, length, end, next;
-       pgd_t *pgd = pgd_offset_raw(pgdir, virt);
+       pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
 
        /*
         * If the virtual and physical address don't have the same offset
@@ -336,10 +343,10 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
        end = addr + length;
        do {
                next = pgd_addr_end(addr, end);
-               alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
+               alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
                               flags);
                phys += next - addr;
-       } while (pgd++, addr = next, addr != end);
+       } while (pgdp++, addr = next, addr != end);
 }
 
 static phys_addr_t pgd_pgtable_alloc(void)
@@ -401,10 +408,10 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
        flush_tlb_kernel_range(virt, virt + size);
 }
 
-static void __init __map_memblock(pgd_t *pgd, phys_addr_t start,
+static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
                                  phys_addr_t end, pgprot_t prot, int flags)
 {
-       __create_pgd_mapping(pgd, start, __phys_to_virt(start), end - start,
+       __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
                             prot, early_pgtable_alloc, flags);
 }
 
@@ -418,7 +425,7 @@ void __init mark_linear_text_alias_ro(void)
                            PAGE_KERNEL_RO);
 }
 
-static void __init map_mem(pgd_t *pgd)
+static void __init map_mem(pgd_t *pgdp)
 {
        phys_addr_t kernel_start = __pa_symbol(_text);
        phys_addr_t kernel_end = __pa_symbol(__init_begin);
@@ -451,7 +458,7 @@ static void __init map_mem(pgd_t *pgd)
                if (memblock_is_nomap(reg))
                        continue;
 
-               __map_memblock(pgd, start, end, PAGE_KERNEL, flags);
+               __map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
        }
 
        /*
@@ -464,7 +471,7 @@ static void __init map_mem(pgd_t *pgd)
         * Note that contiguous mappings cannot be remapped in this way,
         * so we should avoid them here.
         */
-       __map_memblock(pgd, kernel_start, kernel_end,
+       __map_memblock(pgdp, kernel_start, kernel_end,
                       PAGE_KERNEL, NO_CONT_MAPPINGS);
        memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
 
@@ -475,7 +482,7 @@ static void __init map_mem(pgd_t *pgd)
         * through /sys/kernel/kexec_crash_size interface.
         */
        if (crashk_res.end) {
-               __map_memblock(pgd, crashk_res.start, crashk_res.end + 1,
+               __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
                               PAGE_KERNEL,
                               NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
                memblock_clear_nomap(crashk_res.start,
@@ -499,7 +506,7 @@ void mark_rodata_ro(void)
        debug_checkwx();
 }
 
-static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
+static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
                                      pgprot_t prot, struct vm_struct *vma,
                                      int flags, unsigned long vm_flags)
 {
@@ -509,7 +516,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
        BUG_ON(!PAGE_ALIGNED(pa_start));
        BUG_ON(!PAGE_ALIGNED(size));
 
-       __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
+       __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
                             early_pgtable_alloc, flags);
 
        if (!(vm_flags & VM_NO_GUARD))
@@ -562,7 +569,7 @@ core_initcall(map_entry_trampoline);
 /*
  * Create fine-grained mappings for the kernel.
  */
-static void __init map_kernel(pgd_t *pgd)
+static void __init map_kernel(pgd_t *pgdp)
 {
        static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
                                vmlinux_initdata, vmlinux_data;
@@ -578,24 +585,24 @@ static void __init map_kernel(pgd_t *pgd)
         * Only rodata will be remapped with different permissions later on,
         * all other segments are allowed to use contiguous mappings.
         */
-       map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
+       map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
                           VM_NO_GUARD);
-       map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
+       map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
                           &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
-       map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
+       map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
                           &vmlinux_inittext, 0, VM_NO_GUARD);
-       map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
+       map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
                           &vmlinux_initdata, 0, VM_NO_GUARD);
-       map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
+       map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
 
-       if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
+       if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
                /*
                 * The fixmap falls in a separate pgd to the kernel, and doesn't
                 * live in the carveout for the swapper_pg_dir. We can simply
                 * re-use the existing dir for the fixmap.
                 */
-               set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
-                       *pgd_offset_k(FIXADDR_START));
+               set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
+                       READ_ONCE(*pgd_offset_k(FIXADDR_START)));
        } else if (CONFIG_PGTABLE_LEVELS > 3) {
                /*
                 * The fixmap shares its top level pgd entry with the kernel
@@ -604,14 +611,15 @@ static void __init map_kernel(pgd_t *pgd)
                 * entry instead.
                 */
                BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
-               pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START),
+               pud_populate(&init_mm,
+                            pud_set_fixmap_offset(pgdp, FIXADDR_START),
                             lm_alias(bm_pmd));
                pud_clear_fixmap();
        } else {
                BUG();
        }
 
-       kasan_copy_shadow(pgd);
+       kasan_copy_shadow(pgdp);
 }
 
 /*
@@ -621,10 +629,10 @@ static void __init map_kernel(pgd_t *pgd)
 void __init paging_init(void)
 {
        phys_addr_t pgd_phys = early_pgtable_alloc();
-       pgd_t *pgd = pgd_set_fixmap(pgd_phys);
+       pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
 
-       map_kernel(pgd);
-       map_mem(pgd);
+       map_kernel(pgdp);
+       map_mem(pgdp);
 
        /*
         * We want to reuse the original swapper_pg_dir so we don't have to
@@ -635,7 +643,7 @@ void __init paging_init(void)
         * To do this we need to go via a temporary pgd.
         */
        cpu_replace_ttbr1(__va(pgd_phys));
-       memcpy(swapper_pg_dir, pgd, PGD_SIZE);
+       memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
        cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 
        pgd_clear_fixmap();
@@ -655,37 +663,40 @@ void __init paging_init(void)
  */
 int kern_addr_valid(unsigned long addr)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
+       pgd_t *pgdp;
+       pud_t *pudp, pud;
+       pmd_t *pmdp, pmd;
+       pte_t *ptep, pte;
 
        if ((((long)addr) >> VA_BITS) != -1UL)
                return 0;
 
-       pgd = pgd_offset_k(addr);
-       if (pgd_none(*pgd))
+       pgdp = pgd_offset_k(addr);
+       if (pgd_none(READ_ONCE(*pgdp)))
                return 0;
 
-       pud = pud_offset(pgd, addr);
-       if (pud_none(*pud))
+       pudp = pud_offset(pgdp, addr);
+       pud = READ_ONCE(*pudp);
+       if (pud_none(pud))
                return 0;
 
-       if (pud_sect(*pud))
-               return pfn_valid(pud_pfn(*pud));
+       if (pud_sect(pud))
+               return pfn_valid(pud_pfn(pud));
 
-       pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       pmdp = pmd_offset(pudp, addr);
+       pmd = READ_ONCE(*pmdp);
+       if (pmd_none(pmd))
                return 0;
 
-       if (pmd_sect(*pmd))
-               return pfn_valid(pmd_pfn(*pmd));
+       if (pmd_sect(pmd))
+               return pfn_valid(pmd_pfn(pmd));
 
-       pte = pte_offset_kernel(pmd, addr);
-       if (pte_none(*pte))
+       ptep = pte_offset_kernel(pmdp, addr);
+       pte = READ_ONCE(*ptep);
+       if (pte_none(pte))
                return 0;
 
-       return pfn_valid(pte_pfn(*pte));
+       return pfn_valid(pte_pfn(pte));
 }
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 #if !ARM64_SWAPPER_USES_SECTION_MAPS
@@ -700,32 +711,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 {
        unsigned long addr = start;
        unsigned long next;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
+       pgd_t *pgdp;
+       pud_t *pudp;
+       pmd_t *pmdp;
 
        do {
                next = pmd_addr_end(addr, end);
 
-               pgd = vmemmap_pgd_populate(addr, node);
-               if (!pgd)
+               pgdp = vmemmap_pgd_populate(addr, node);
+               if (!pgdp)
                        return -ENOMEM;
 
-               pud = vmemmap_pud_populate(pgd, addr, node);
-               if (!pud)
+               pudp = vmemmap_pud_populate(pgdp, addr, node);
+               if (!pudp)
                        return -ENOMEM;
 
-               pmd = pmd_offset(pud, addr);
-               if (pmd_none(*pmd)) {
+               pmdp = pmd_offset(pudp, addr);
+               if (pmd_none(READ_ONCE(*pmdp))) {
                        void *p = NULL;
 
                        p = vmemmap_alloc_block_buf(PMD_SIZE, node);
                        if (!p)
                                return -ENOMEM;
 
-                       pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL));
+                       pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
                } else
-                       vmemmap_verify((pte_t *)pmd, node, addr, next);
+                       vmemmap_verify((pte_t *)pmdp, node, addr, next);
        } while (addr = next, addr != end);
 
        return 0;
@@ -739,20 +750,22 @@ void vmemmap_free(unsigned long start, unsigned long end,
 
 static inline pud_t * fixmap_pud(unsigned long addr)
 {
-       pgd_t *pgd = pgd_offset_k(addr);
+       pgd_t *pgdp = pgd_offset_k(addr);
+       pgd_t pgd = READ_ONCE(*pgdp);
 
-       BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
+       BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
 
-       return pud_offset_kimg(pgd, addr);
+       return pud_offset_kimg(pgdp, addr);
 }
 
 static inline pmd_t * fixmap_pmd(unsigned long addr)
 {
-       pud_t *pud = fixmap_pud(addr);
+       pud_t *pudp = fixmap_pud(addr);
+       pud_t pud = READ_ONCE(*pudp);
 
-       BUG_ON(pud_none(*pud) || pud_bad(*pud));
+       BUG_ON(pud_none(pud) || pud_bad(pud));
 
-       return pmd_offset_kimg(pud, addr);
+       return pmd_offset_kimg(pudp, addr);
 }
 
 static inline pte_t * fixmap_pte(unsigned long addr)
@@ -768,30 +781,31 @@ static inline pte_t * fixmap_pte(unsigned long addr)
  */
 void __init early_fixmap_init(void)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
+       pgd_t *pgdp, pgd;
+       pud_t *pudp;
+       pmd_t *pmdp;
        unsigned long addr = FIXADDR_START;
 
-       pgd = pgd_offset_k(addr);
+       pgdp = pgd_offset_k(addr);
+       pgd = READ_ONCE(*pgdp);
        if (CONFIG_PGTABLE_LEVELS > 3 &&
-           !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
+           !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
                /*
                 * We only end up here if the kernel mapping and the fixmap
                 * share the top level pgd entry, which should only happen on
                 * 16k/4 levels configurations.
                 */
                BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
-               pud = pud_offset_kimg(pgd, addr);
+               pudp = pud_offset_kimg(pgdp, addr);
        } else {
-               if (pgd_none(*pgd))
-                       __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
-               pud = fixmap_pud(addr);
+               if (pgd_none(pgd))
+                       __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
+               pudp = fixmap_pud(addr);
        }
-       if (pud_none(*pud))
-               __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
-       pmd = fixmap_pmd(addr);
-       __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
+       if (pud_none(READ_ONCE(*pudp)))
+               __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
+       pmdp = fixmap_pmd(addr);
+       __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
 
        /*
         * The boot-ioremap range spans multiple pmds, for which
@@ -800,11 +814,11 @@ void __init early_fixmap_init(void)
        BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
                     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
 
-       if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
-            || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+       if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
+            || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
                WARN_ON(1);
-               pr_warn("pmd %p != %p, %p\n",
-                       pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
+               pr_warn("pmdp %p != %p, %p\n",
+                       pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
                        fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
                pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
                        fix_to_virt(FIX_BTMAP_BEGIN));
@@ -824,16 +838,16 @@ void __set_fixmap(enum fixed_addresses idx,
                               phys_addr_t phys, pgprot_t flags)
 {
        unsigned long addr = __fix_to_virt(idx);
-       pte_t *pte;
+       pte_t *ptep;
 
        BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
 
-       pte = fixmap_pte(addr);
+       ptep = fixmap_pte(addr);
 
        if (pgprot_val(flags)) {
-               set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+               set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
        } else {
-               pte_clear(&init_mm, addr, pte);
+               pte_clear(&init_mm, addr, ptep);
                flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
        }
 }
@@ -915,36 +929,36 @@ int __init arch_ioremap_pmd_supported(void)
        return 1;
 }
 
-int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
 {
        pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
                                        pgprot_val(mk_sect_prot(prot)));
        BUG_ON(phys & ~PUD_MASK);
-       set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot));
+       set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
        return 1;
 }
 
-int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
 {
        pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
                                        pgprot_val(mk_sect_prot(prot)));
        BUG_ON(phys & ~PMD_MASK);
-       set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot));
+       set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
        return 1;
 }
 
-int pud_clear_huge(pud_t *pud)
+int pud_clear_huge(pud_t *pudp)
 {
-       if (!pud_sect(*pud))
+       if (!pud_sect(READ_ONCE(*pudp)))
                return 0;
-       pud_clear(pud);
+       pud_clear(pudp);
        return 1;
 }
 
-int pmd_clear_huge(pmd_t *pmd)
+int pmd_clear_huge(pmd_t *pmdp)
 {
-       if (!pmd_sect(*pmd))
+       if (!pmd_sect(READ_ONCE(*pmdp)))
                return 0;
-       pmd_clear(pmd);
+       pmd_clear(pmdp);
        return 1;
 }
index a682a0a2a0fa4d5db9175256e78100b1afde344a..a56359373d8b3592e6cde6891d9b44206bd96137 100644 (file)
@@ -29,7 +29,7 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
                        void *data)
 {
        struct page_change_data *cdata = data;
-       pte_t pte = *ptep;
+       pte_t pte = READ_ONCE(*ptep);
 
        pte = clear_pte_bit(pte, cdata->clear_mask);
        pte = set_pte_bit(pte, cdata->set_mask);
@@ -156,30 +156,32 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
  */
 bool kernel_page_present(struct page *page)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
+       pgd_t *pgdp;
+       pud_t *pudp, pud;
+       pmd_t *pmdp, pmd;
+       pte_t *ptep;
        unsigned long addr = (unsigned long)page_address(page);
 
-       pgd = pgd_offset_k(addr);
-       if (pgd_none(*pgd))
+       pgdp = pgd_offset_k(addr);
+       if (pgd_none(READ_ONCE(*pgdp)))
                return false;
 
-       pud = pud_offset(pgd, addr);
-       if (pud_none(*pud))
+       pudp = pud_offset(pgdp, addr);
+       pud = READ_ONCE(*pudp);
+       if (pud_none(pud))
                return false;
-       if (pud_sect(*pud))
+       if (pud_sect(pud))
                return true;
 
-       pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       pmdp = pmd_offset(pudp, addr);
+       pmd = READ_ONCE(*pmdp);
+       if (pmd_none(pmd))
                return false;
-       if (pmd_sect(*pmd))
+       if (pmd_sect(pmd))
                return true;
 
-       pte = pte_offset_kernel(pmd, addr);
-       return pte_valid(*pte);
+       ptep = pte_offset_kernel(pmdp, addr);
+       return pte_valid(READ_ONCE(*ptep));
 }
 #endif /* CONFIG_HIBERNATION */
 #endif /* CONFIG_DEBUG_PAGEALLOC */
index 71baed7e592a499196a1c7bc239dbbc3297d3bc2..c0af4761729986da832a8e844b256e64da8cb4c1 100644 (file)
@@ -205,7 +205,8 @@ ENDPROC(idmap_cpu_replace_ttbr1)
        dc      cvac, cur_\()\type\()p          // Ensure any existing dirty
        dmb     sy                              // lines are written back before
        ldr     \type, [cur_\()\type\()p]       // loading the entry
-       tbz     \type, #0, next_\()\type        // Skip invalid entries
+       tbz     \type, #0, skip_\()\type        // Skip invalid and
+       tbnz    \type, #11, skip_\()\type       // non-global entries
        .endm
 
        .macro __idmap_kpti_put_pgtable_ent_ng, type
@@ -265,8 +266,9 @@ ENTRY(idmap_kpti_install_ng_mappings)
        add     end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
 do_pgd:        __idmap_kpti_get_pgtable_ent    pgd
        tbnz    pgd, #1, walk_puds
-       __idmap_kpti_put_pgtable_ent_ng pgd
 next_pgd:
+       __idmap_kpti_put_pgtable_ent_ng pgd
+skip_pgd:
        add     cur_pgdp, cur_pgdp, #8
        cmp     cur_pgdp, end_pgdp
        b.ne    do_pgd
@@ -294,8 +296,9 @@ walk_puds:
        add     end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
 do_pud:        __idmap_kpti_get_pgtable_ent    pud
        tbnz    pud, #1, walk_pmds
-       __idmap_kpti_put_pgtable_ent_ng pud
 next_pud:
+       __idmap_kpti_put_pgtable_ent_ng pud
+skip_pud:
        add     cur_pudp, cur_pudp, 8
        cmp     cur_pudp, end_pudp
        b.ne    do_pud
@@ -314,8 +317,9 @@ walk_pmds:
        add     end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
 do_pmd:        __idmap_kpti_get_pgtable_ent    pmd
        tbnz    pmd, #1, walk_ptes
-       __idmap_kpti_put_pgtable_ent_ng pmd
 next_pmd:
+       __idmap_kpti_put_pgtable_ent_ng pmd
+skip_pmd:
        add     cur_pmdp, cur_pmdp, #8
        cmp     cur_pmdp, end_pmdp
        b.ne    do_pmd
@@ -333,7 +337,7 @@ walk_ptes:
        add     end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
 do_pte:        __idmap_kpti_get_pgtable_ent    pte
        __idmap_kpti_put_pgtable_ent_ng pte
-next_pte:
+skip_pte:
        add     cur_ptep, cur_ptep, #8
        cmp     cur_ptep, end_ptep
        b.ne    do_pte
index 0b4c65a1af25fbe264766ee1e14c4c0bf19bc5e6..498f3da3f225d2ed8479af540bbbc22786e2c55d 100644 (file)
@@ -41,7 +41,6 @@ ifneq ($(CONFIG_IA64_ESI),)
 obj-y                          += esi_stub.o   # must be in kernel proper
 endif
 obj-$(CONFIG_INTEL_IOMMU)      += pci-dma.o
-obj-$(CONFIG_SWIOTLB)          += pci-swiotlb.o
 
 obj-$(CONFIG_BINFMT_ELF)       += elfcore.o
 
index 19c88d770054617bfc23b15c81c20b12a7f7cc3f..fcf9af492d60229a491337e56b1ebc9adf60f7fb 100644 (file)
@@ -10,6 +10,8 @@
 
 #include <linux/errno.h>
 #include <linux/percpu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/spinlock.h>
 
 #include <asm/mips-cps.h>
@@ -22,6 +24,17 @@ static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
 
 phys_addr_t __weak mips_cpc_default_phys_base(void)
 {
+       struct device_node *cpc_node;
+       struct resource res;
+       int err;
+
+       cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
+       if (cpc_node) {
+               err = of_address_to_resource(cpc_node, 0, &res);
+               if (!err)
+                       return res.start;
+       }
+
        return 0;
 }
 
index 85bc601e9a0d43ffd89669eac4b359de99566083..5f8b0a9e30b3d6faec9befca1e759a8f9263f8c8 100644 (file)
@@ -375,6 +375,7 @@ static void __init bootmem_init(void)
        unsigned long reserved_end;
        unsigned long mapstart = ~0UL;
        unsigned long bootmap_size;
+       phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX;
        bool bootmap_valid = false;
        int i;
 
@@ -395,7 +396,8 @@ static void __init bootmem_init(void)
        max_low_pfn = 0;
 
        /*
-        * Find the highest page frame number we have available.
+        * Find the highest page frame number we have available
+        * and the lowest used RAM address
         */
        for (i = 0; i < boot_mem_map.nr_map; i++) {
                unsigned long start, end;
@@ -407,6 +409,8 @@ static void __init bootmem_init(void)
                end = PFN_DOWN(boot_mem_map.map[i].addr
                                + boot_mem_map.map[i].size);
 
+               ramstart = min(ramstart, boot_mem_map.map[i].addr);
+
 #ifndef CONFIG_HIGHMEM
                /*
                 * Skip highmem here so we get an accurate max_low_pfn if low
@@ -436,6 +440,13 @@ static void __init bootmem_init(void)
                mapstart = max(reserved_end, start);
        }
 
+       /*
+        * Reserve any memory between the start of RAM and PHYS_OFFSET
+        */
+       if (ramstart > PHYS_OFFSET)
+               add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
+                                 BOOT_MEM_RESERVED);
+
        if (min_low_pfn >= max_low_pfn)
                panic("Incorrect memory mapping !!!");
        if (min_low_pfn > ARCH_PFN_OFFSET) {
@@ -664,9 +675,6 @@ static int __init early_parse_mem(char *p)
 
        add_memory_region(start, size, BOOT_MEM_RAM);
 
-       if (start && start > PHYS_OFFSET)
-               add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET,
-                               BOOT_MEM_RESERVED);
        return 0;
 }
 early_param("mem", early_parse_mem);
index 87dcac2447c8df20a572139d5053624e91acf2ca..9d41732a9146a31545b9114812cb12c669196478 100644 (file)
@@ -572,7 +572,7 @@ asmlinkage void __weak plat_wired_tlb_setup(void)
         */
 }
 
-void __init bmips_cpu_setup(void)
+void bmips_cpu_setup(void)
 {
        void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
        u32 __maybe_unused cfg;
index 30a155c0a6b07e31ca69d5a7418fb19a1f6e7872..c615abdce119ea34ff6c33d02109cd700036db64 100644 (file)
@@ -16,6 +16,7 @@
 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
 
 #define PMD_CACHE_INDEX        PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX        PUD_INDEX_SIZE
 
 #ifndef __ASSEMBLY__
 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
index 949d691094a46d674880dd1e54da971a4161f815..67c5475311ee6e03b29486f8518dc74758263224 100644 (file)
@@ -63,7 +63,8 @@ static inline int hash__hugepd_ok(hugepd_t hpd)
  * keeping the prototype consistent across the two formats.
  */
 static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
-                       unsigned int subpg_index, unsigned long hidx)
+                                        unsigned int subpg_index, unsigned long hidx,
+                                        int offset)
 {
        return (hidx << H_PAGE_F_GIX_SHIFT) &
                (H_PAGE_F_SECOND | H_PAGE_F_GIX);
index 338b7da468cef309fa2b787c852e96ab05f014e5..3bcf269f8f55470097ac56680685321bf13e62ba 100644 (file)
@@ -45,7 +45,7 @@
  * generic accessors and iterators here
  */
 #define __real_pte __real_pte
-static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
 {
        real_pte_t rpte;
        unsigned long *hidxp;
@@ -59,7 +59,7 @@ static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
         */
        smp_rmb();
 
-       hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
+       hidxp = (unsigned long *)(ptep + offset);
        rpte.hidx = *hidxp;
        return rpte;
 }
@@ -86,9 +86,10 @@ static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
  * expected to modify the PTE bits accordingly and commit the PTE to memory.
  */
 static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
-               unsigned int subpg_index, unsigned long hidx)
+                                        unsigned int subpg_index,
+                                        unsigned long hidx, int offset)
 {
-       unsigned long *hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
+       unsigned long *hidxp = (unsigned long *)(ptep + offset);
 
        rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
        *hidxp = rpte.hidx  | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
@@ -140,13 +141,18 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
 }
 
 #define H_PTE_TABLE_SIZE       PTE_FRAG_SIZE
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
 #define H_PMD_TABLE_SIZE       ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
                                 (sizeof(unsigned long) << PMD_INDEX_SIZE))
 #else
 #define H_PMD_TABLE_SIZE       (sizeof(pmd_t) << PMD_INDEX_SIZE)
 #endif
+#ifdef CONFIG_HUGETLB_PAGE
+#define H_PUD_TABLE_SIZE       ((sizeof(pud_t) << PUD_INDEX_SIZE) +    \
+                                (sizeof(unsigned long) << PUD_INDEX_SIZE))
+#else
 #define H_PUD_TABLE_SIZE       (sizeof(pud_t) << PUD_INDEX_SIZE)
+#endif
 #define H_PGD_TABLE_SIZE       (sizeof(pgd_t) << PGD_INDEX_SIZE)
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 0920eff731b385221edeb46a04ed5f280ad76ff8..935adcd92a81655ed79e9c21fb6e196233ca69b1 100644 (file)
@@ -23,7 +23,8 @@
                                 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
 #define H_PGTABLE_RANGE                (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
 
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&  defined(CONFIG_PPC_64K_PAGES)
+#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \
+       defined(CONFIG_PPC_64K_PAGES)
 /*
  * only with hash 64k we need to use the second half of pmd page table
  * to store pointer to deposited pgtable_t
 #else
 #define H_PMD_CACHE_INDEX      H_PMD_INDEX_SIZE
 #endif
+/*
+ * We store the slot details in the second half of page table.
+ * Increase the pud level table so that hugetlb ptes can be stored
+ * at pud level.
+ */
+#if defined(CONFIG_HUGETLB_PAGE) &&  defined(CONFIG_PPC_64K_PAGES)
+#define H_PUD_CACHE_INDEX      (H_PUD_INDEX_SIZE + 1)
+#else
+#define H_PUD_CACHE_INDEX      (H_PUD_INDEX_SIZE)
+#endif
 /*
  * Define the address range of the kernel non-linear virtual area
  */
index 1fcfa425cefaf205fe787cc9a480265aec758e0f..4746bc68d446d8e95427e67a86c3493a1d1f1668 100644 (file)
@@ -73,10 +73,16 @@ static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
+       pgd_t *pgd;
+
        if (radix_enabled())
                return radix__pgd_alloc(mm);
-       return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
-               pgtable_gfp_flags(mm, GFP_KERNEL));
+
+       pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+                              pgtable_gfp_flags(mm, GFP_KERNEL));
+       memset(pgd, 0, PGD_TABLE_SIZE);
+
+       return pgd;
 }
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -93,13 +99,13 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
+       return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
                pgtable_gfp_flags(mm, GFP_KERNEL));
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 {
-       kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
+       kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
 }
 
 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -115,7 +121,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
         * ahead and flush the page walk cache
         */
        flush_tlb_pgtable(tlb, address);
-        pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
+       pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
 }
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
index 51017726d49539fda8cba5b346460aeba104c3f9..a6b9f1d746002cd3479686603c76322d52676db9 100644 (file)
@@ -232,11 +232,13 @@ extern unsigned long __pmd_index_size;
 extern unsigned long __pud_index_size;
 extern unsigned long __pgd_index_size;
 extern unsigned long __pmd_cache_index;
+extern unsigned long __pud_cache_index;
 #define PTE_INDEX_SIZE  __pte_index_size
 #define PMD_INDEX_SIZE  __pmd_index_size
 #define PUD_INDEX_SIZE  __pud_index_size
 #define PGD_INDEX_SIZE  __pgd_index_size
 #define PMD_CACHE_INDEX __pmd_cache_index
+#define PUD_CACHE_INDEX __pud_cache_index
 /*
  * Because of use of pte fragments and THP, size of page table
  * are not always derived out of index size above.
@@ -348,7 +350,7 @@ extern unsigned long pci_io_base;
  */
 #ifndef __real_pte
 
-#define __real_pte(e,p)                ((real_pte_t){(e)})
+#define __real_pte(e, p, o)            ((real_pte_t){(e)})
 #define __rpte_to_pte(r)       ((r).pte)
 #define __rpte_to_hidx(r,index)        (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
 
index 176dfb73d42c073df181013c6497cb42f9c8f85d..471b2274fbeba815f04c1957d975f0f1a74bcdbe 100644 (file)
@@ -645,7 +645,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
                                          EXC_HV, SOFTEN_TEST_HV, bitmask)
 
 #define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask)           \
-       MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec, bitmask);\
+       MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
        EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
 
 /*
index 88e5e8f17e9896e5a051845235bfa0c822684552..855e17d158b11f04120b9b39a352af91cedb95c8 100644 (file)
 #define PACA_IRQ_HMI           0x20
 #define PACA_IRQ_PMI           0x40
 
+/*
+ * Some soft-masked interrupts must be hard masked until they are replayed
+ * (e.g., because the soft-masked handler does not clear the exception).
+ */
+#ifdef CONFIG_PPC_BOOK3S
+#define PACA_IRQ_MUST_HARD_MASK        (PACA_IRQ_EE|PACA_IRQ_PMI)
+#else
+#define PACA_IRQ_MUST_HARD_MASK        (PACA_IRQ_EE)
+#endif
+
 /*
  * flags for paca->irq_soft_mask
  */
@@ -244,7 +254,7 @@ static inline bool lazy_irq_pending(void)
 static inline void may_hard_irq_enable(void)
 {
        get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
-       if (!(get_paca()->irq_happened & PACA_IRQ_EE))
+       if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK))
                __hard_irq_enable();
 }
 
index 9dcbfa6bbb91e740e483fa6c5c56b8422b8440c8..d8b1e8e7e035b31acd7372eee6bd8017166987d4 100644 (file)
@@ -140,6 +140,12 @@ static inline bool kdump_in_progress(void)
        return false;
 }
 
+static inline void crash_ipi_callback(struct pt_regs *regs) { }
+
+static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
+{
+}
+
 #endif /* CONFIG_KEXEC_CORE */
 #endif /* ! __ASSEMBLY__ */
 #endif /* __KERNEL__ */
index 504a3c36ce5c9b311a9c8864d112792ae60fab9f..03bbd1149530d3115d7c9e84c66893428fe4af43 100644 (file)
@@ -24,6 +24,7 @@ extern int icache_44x_need_flush;
 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
 
 #define PMD_CACHE_INDEX        PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX        PUD_INDEX_SIZE
 
 #ifndef __ASSEMBLY__
 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
index abddf5830ad5550ee2c72875b209d9546afec579..5c5f75d005ada6289633455dc89bc0d2641c2e54 100644 (file)
@@ -27,6 +27,7 @@
 #else
 #define PMD_CACHE_INDEX        PMD_INDEX_SIZE
 #endif
+#define PUD_CACHE_INDEX PUD_INDEX_SIZE
 
 /*
  * Define the address range of the kernel non-linear virtual area
index 88187c285c70d5d823ccdf0a572cb8a7e24a6341..9f421641a35c8240cbacf192f6a1b22b4f33c63c 100644 (file)
@@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid);
 extern void sysfs_remove_device_from_node(struct device *dev, int nid);
 extern int numa_update_cpu_topology(bool cpus_locked);
 
+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
+{
+       numa_cpu_lookup_table[cpu] = node;
+}
+
 static inline int early_cpu_to_node(int cpu)
 {
        int nid;
@@ -76,12 +81,16 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
 {
        return 0;
 }
+
+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
+
 #endif /* CONFIG_NUMA */
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
 extern int start_topology_update(void);
 extern int stop_topology_update(void);
 extern int prrn_is_enabled(void);
+extern int find_and_online_cpu_nid(int cpu);
 #else
 static inline int start_topology_update(void)
 {
@@ -95,6 +104,10 @@ static inline int prrn_is_enabled(void)
 {
        return 0;
 }
+static inline int find_and_online_cpu_nid(int cpu)
+{
+       return 0;
+}
 #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
 
 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES)
index ee832d344a5a265018d8fa98f0c38c0a94549db5..9b6e653e501a1264a8bfa398400dd88ecfa38d25 100644 (file)
@@ -943,6 +943,8 @@ kernel_dbg_exc:
 /*
  * An interrupt came in while soft-disabled; We mark paca->irq_happened
  * accordingly and if the interrupt is level sensitive, we hard disable
+ * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so
+ * keep these in synch.
  */
 
 .macro masked_interrupt_book3e paca_irq full_mask
index 243d072a225aac1f7c7eaa69b6e5ef8cd21ce2c6..3ac87e53b3da0fdc0c41bd967ac731f5d98d6efb 100644 (file)
@@ -1426,7 +1426,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
  *   triggered and won't automatically refire.
  * - If it was a HMI we return immediately since we handled it in realmode
  *   and it won't refire.
- * - else we hard disable and return.
+ * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
  * This is called with r10 containing the value to OR to the paca field.
  */
 #define MASKED_INTERRUPT(_H)                           \
@@ -1441,8 +1441,8 @@ masked_##_H##interrupt:                                   \
        ori     r10,r10,0xffff;                         \
        mtspr   SPRN_DEC,r10;                           \
        b       MASKED_DEC_HANDLER_LABEL;               \
-1:     andi.   r10,r10,(PACA_IRQ_DBELL|PACA_IRQ_HMI);  \
-       bne     2f;                                     \
+1:     andi.   r10,r10,PACA_IRQ_MUST_HARD_MASK;        \
+       beq     2f;                                     \
        mfspr   r10,SPRN_##_H##SRR1;                    \
        xori    r10,r10,MSR_EE; /* clear MSR_EE */      \
        mtspr   SPRN_##_H##SRR1,r10;                    \
index 5a8bfee6e1877c58ae607445ea77af1ed6b2e869..04d0bbd7a1dd03e13e47e4c5e10a647672955ea3 100644 (file)
@@ -788,7 +788,8 @@ static int register_cpu_online(unsigned int cpu)
        if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
                device_create_file(s, &dev_attr_pir);
 
-       if (cpu_has_feature(CPU_FTR_ARCH_206))
+       if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+               !firmware_has_feature(FW_FEATURE_LPAR))
                device_create_file(s, &dev_attr_tscr);
 #endif /* CONFIG_PPC64 */
 
@@ -873,7 +874,8 @@ static int unregister_cpu_online(unsigned int cpu)
        if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
                device_remove_file(s, &dev_attr_pir);
 
-       if (cpu_has_feature(CPU_FTR_ARCH_206))
+       if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+               !firmware_has_feature(FW_FEATURE_LPAR))
                device_remove_file(s, &dev_attr_tscr);
 #endif /* CONFIG_PPC64 */
 
index 1604110c42386c39dea239aaa1a564ba5bc7b38e..916844f99c64e59655d3372ac4e69c731f0751e9 100644 (file)
@@ -216,6 +216,8 @@ static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
        u32 i, n_lmbs;
 
        n_lmbs = of_read_number(prop++, 1);
+       if (n_lmbs == 0)
+               return;
 
        for (i = 0; i < n_lmbs; i++) {
                read_drconf_v1_cell(&lmb, &prop);
@@ -245,6 +247,8 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
        u32 i, j, lmb_sets;
 
        lmb_sets = of_read_number(prop++, 1);
+       if (lmb_sets == 0)
+               return;
 
        for (i = 0; i < lmb_sets; i++) {
                read_drconf_v2_cell(&dr_cell, &prop);
@@ -354,6 +358,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
        struct drmem_lmb *lmb;
 
        drmem_info->n_lmbs = of_read_number(prop++, 1);
+       if (drmem_info->n_lmbs == 0)
+               return;
 
        drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
                                   GFP_KERNEL);
@@ -373,6 +379,8 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
        int lmb_index;
 
        lmb_sets = of_read_number(prop++, 1);
+       if (lmb_sets == 0)
+               return;
 
        /* first pass, calculate the number of LMBs */
        p = prop;
index 5a69b51d08a3615f319a325536a209b6399db2a7..d573d7d07f25f4d718043a71e199475d3df9597c 100644 (file)
@@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
         * need to add in 0x1 if it's a read-only user page
         */
        rflags = htab_convert_pte_flags(new_pte);
-       rpte = __real_pte(__pte(old_pte), ptep);
+       rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
 
        if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
            !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -117,7 +117,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
                        return -1;
                }
                new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
-               new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
+               new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
        }
        *ptep = __pte(new_pte & ~H_PAGE_BUSY);
        return 0;
index 2253bbc6a599d7804cb81dc49b2c6f82b81a435d..e601d95c3b20271d7b9cc6483cab402d51d80436 100644 (file)
@@ -86,7 +86,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
 
        subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
        vpn  = hpt_vpn(ea, vsid, ssize);
-       rpte = __real_pte(__pte(old_pte), ptep);
+       rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
        /*
         *None of the sub 4k page is hashed
         */
@@ -214,7 +214,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
                return -1;
        }
 
-       new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot);
+       new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE);
        new_pte |= H_PAGE_HASHPTE;
 
        *ptep = __pte(new_pte & ~H_PAGE_BUSY);
@@ -262,7 +262,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
        } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
 
        rflags = htab_convert_pte_flags(new_pte);
-       rpte = __real_pte(__pte(old_pte), ptep);
+       rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
 
        if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
            !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -327,7 +327,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
                }
 
                new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
-               new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
+               new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
        }
        *ptep = __pte(new_pte & ~H_PAGE_BUSY);
        return 0;
index 7d07c7e17db6708334ea38cad711e1f5c32de1c6..cf290d415dcd8e9e314c63134c49cbd687e63fd7 100644 (file)
@@ -1008,6 +1008,7 @@ void __init hash__early_init_mmu(void)
        __pmd_index_size = H_PMD_INDEX_SIZE;
        __pud_index_size = H_PUD_INDEX_SIZE;
        __pgd_index_size = H_PGD_INDEX_SIZE;
+       __pud_cache_index = H_PUD_CACHE_INDEX;
        __pmd_cache_index = H_PMD_CACHE_INDEX;
        __pte_table_size = H_PTE_TABLE_SIZE;
        __pmd_table_size = H_PMD_TABLE_SIZE;
index 12511f5a015fcfee349e9dd7ac00f6ecd3b8df90..b320f5097a0616dce810c31e42fa42659475d4c3 100644 (file)
@@ -27,7 +27,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
        unsigned long vpn;
        unsigned long old_pte, new_pte;
        unsigned long rflags, pa, sz;
-       long slot;
+       long slot, offset;
 
        BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
 
@@ -63,7 +63,11 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
        } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
 
        rflags = htab_convert_pte_flags(new_pte);
-       rpte = __real_pte(__pte(old_pte), ptep);
+       if (unlikely(mmu_psize == MMU_PAGE_16G))
+               offset = PTRS_PER_PUD;
+       else
+               offset = PTRS_PER_PMD;
+       rpte = __real_pte(__pte(old_pte), ptep, offset);
 
        sz = ((1UL) << shift);
        if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -104,7 +108,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                        return -1;
                }
 
-               new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
+               new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
        }
 
        /*
index eb8c6c8c4851a9a7e25a555dd3e61c7b0613fb4e..2b656e67f2eaaa3914cd74d1cd57e36a5060486b 100644 (file)
@@ -100,6 +100,6 @@ void pgtable_cache_init(void)
         * same size as either the pgd or pmd index except with THP enabled
         * on book3s 64
         */
-       if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
-               pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
+       if (PUD_CACHE_INDEX && !PGT_CACHE(PUD_CACHE_INDEX))
+               pgtable_cache_add(PUD_CACHE_INDEX, pud_ctor);
 }
index 314d19ab9385e038a4f38c18a50364da4873eb86..edd8d0bc9364f2843688498b221d90f53647390d 100644 (file)
@@ -143,11 +143,6 @@ static void reset_numa_cpu_lookup_table(void)
                numa_cpu_lookup_table[cpu] = -1;
 }
 
-static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
-{
-       numa_cpu_lookup_table[cpu] = node;
-}
-
 static void map_cpu_to_node(int cpu, int node)
 {
        update_numa_cpu_lookup_table(cpu, node);
index 573a9a2ee4555c53ab2416e70fb4fdd05fb464da..2e10a964e29080149fe60b5d9a2220fdc710bb79 100644 (file)
 #include <linux/of_fdt.h>
 #include <linux/mm.h>
 #include <linux/string_helpers.h>
+#include <linux/stop_machine.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
 #include <asm/dma.h>
 #include <asm/machdep.h>
 #include <asm/mmu.h>
@@ -333,6 +335,22 @@ static void __init radix_init_pgtable(void)
                     "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
        asm volatile("eieio; tlbsync; ptesync" : : : "memory");
        trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
+
+       /*
+        * The init_mm context is given the first available (non-zero) PID,
+        * which is the "guard PID" and contains no page table. PIDR should
+        * never be set to zero because that duplicates the kernel address
+        * space at the 0x0... offset (quadrant 0)!
+        *
+        * An arbitrary PID that may later be allocated by the PID allocator
+        * for userspace processes must not be used either, because that
+        * would cause stale user mappings for that PID on CPUs outside of
+        * the TLB invalidation scheme (because it won't be in mm_cpumask).
+        *
+        * So permanently carve out one PID for the purpose of a guard PID.
+        */
+       init_mm.context.id = mmu_base_pid;
+       mmu_base_pid++;
 }
 
 static void __init radix_init_partition_table(void)
@@ -535,6 +553,7 @@ void __init radix__early_init_mmu(void)
        __pmd_index_size = RADIX_PMD_INDEX_SIZE;
        __pud_index_size = RADIX_PUD_INDEX_SIZE;
        __pgd_index_size = RADIX_PGD_INDEX_SIZE;
+       __pud_cache_index = RADIX_PUD_INDEX_SIZE;
        __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
        __pte_table_size = RADIX_PTE_TABLE_SIZE;
        __pmd_table_size = RADIX_PMD_TABLE_SIZE;
@@ -579,7 +598,8 @@ void __init radix__early_init_mmu(void)
 
        radix_init_iamr();
        radix_init_pgtable();
-
+       /* Switch to the guard PID before turning on MMU */
+       radix__switch_mmu_context(NULL, &init_mm);
        if (cpu_has_feature(CPU_FTR_HVMODE))
                tlbiel_all();
 }
@@ -604,6 +624,7 @@ void radix__early_init_mmu_secondary(void)
        }
        radix_init_iamr();
 
+       radix__switch_mmu_context(NULL, &init_mm);
        if (cpu_has_feature(CPU_FTR_HVMODE))
                tlbiel_all();
 }
@@ -666,6 +687,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
        pud_clear(pud);
 }
 
+struct change_mapping_params {
+       pte_t *pte;
+       unsigned long start;
+       unsigned long end;
+       unsigned long aligned_start;
+       unsigned long aligned_end;
+};
+
+static int stop_machine_change_mapping(void *data)
+{
+       struct change_mapping_params *params =
+                       (struct change_mapping_params *)data;
+
+       if (!data)
+               return -1;
+
+       spin_unlock(&init_mm.page_table_lock);
+       pte_clear(&init_mm, params->aligned_start, params->pte);
+       create_physical_mapping(params->aligned_start, params->start);
+       create_physical_mapping(params->end, params->aligned_end);
+       spin_lock(&init_mm.page_table_lock);
+       return 0;
+}
+
 static void remove_pte_table(pte_t *pte_start, unsigned long addr,
                             unsigned long end)
 {
@@ -694,6 +739,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
        }
 }
 
+/*
+ * clear the pte and potentially split the mapping helper
+ */
+static void split_kernel_mapping(unsigned long addr, unsigned long end,
+                               unsigned long size, pte_t *pte)
+{
+       unsigned long mask = ~(size - 1);
+       unsigned long aligned_start = addr & mask;
+       unsigned long aligned_end = addr + size;
+       struct change_mapping_params params;
+       bool split_region = false;
+
+       if ((end - addr) < size) {
+               /*
+                * We're going to clear the PTE, but not flushed
+                * the mapping, time to remap and flush. The
+                * effects if visible outside the processor or
+                * if we are running in code close to the
+                * mapping we cleared, we are in trouble.
+                */
+               if (overlaps_kernel_text(aligned_start, addr) ||
+                       overlaps_kernel_text(end, aligned_end)) {
+                       /*
+                        * Hack, just return, don't pte_clear
+                        */
+                       WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
+                                 "text, not splitting\n", addr, end);
+                       return;
+               }
+               split_region = true;
+       }
+
+       if (split_region) {
+               params.pte = pte;
+               params.start = addr;
+               params.end = end;
+               params.aligned_start = addr & ~(size - 1);
+               params.aligned_end = min_t(unsigned long, aligned_end,
+                               (unsigned long)__va(memblock_end_of_DRAM()));
+               stop_machine(stop_machine_change_mapping, &params, NULL);
+               return;
+       }
+
+       pte_clear(&init_mm, addr, pte);
+}
+
 static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
                             unsigned long end)
 {
@@ -709,13 +800,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
                        continue;
 
                if (pmd_huge(*pmd)) {
-                       if (!IS_ALIGNED(addr, PMD_SIZE) ||
-                           !IS_ALIGNED(next, PMD_SIZE)) {
-                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
-                               continue;
-                       }
-
-                       pte_clear(&init_mm, addr, (pte_t *)pmd);
+                       split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
                        continue;
                }
 
@@ -740,13 +825,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
                        continue;
 
                if (pud_huge(*pud)) {
-                       if (!IS_ALIGNED(addr, PUD_SIZE) ||
-                           !IS_ALIGNED(next, PUD_SIZE)) {
-                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
-                               continue;
-                       }
-
-                       pte_clear(&init_mm, addr, (pte_t *)pud);
+                       split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
                        continue;
                }
 
@@ -772,13 +851,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
                        continue;
 
                if (pgd_huge(*pgd)) {
-                       if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
-                           !IS_ALIGNED(next, PGDIR_SIZE)) {
-                               WARN_ONCE(1, "%s: unaligned range\n", __func__);
-                               continue;
-                       }
-
-                       pte_clear(&init_mm, addr, (pte_t *)pgd);
+                       split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
                        continue;
                }
 
index c9a623c2d8a270a14966003258f035fb95d08cff..28c980eb4422284d788716e245934c679925ad86 100644 (file)
@@ -82,6 +82,8 @@ unsigned long __pgd_index_size;
 EXPORT_SYMBOL(__pgd_index_size);
 unsigned long __pmd_cache_index;
 EXPORT_SYMBOL(__pmd_cache_index);
+unsigned long __pud_cache_index;
+EXPORT_SYMBOL(__pud_cache_index);
 unsigned long __pte_table_size;
 EXPORT_SYMBOL(__pte_table_size);
 unsigned long __pmd_table_size;
@@ -471,6 +473,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
        if (old & PATB_HR) {
                asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
                             "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
+               asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
+                            "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
                trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
        } else {
                asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
index 881ebd53ffc27c8840ae57b088c4d247ba7ef191..9b23f12e863cc14ff324b9c5ffed077c3a1012e8 100644 (file)
@@ -51,7 +51,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
        unsigned int psize;
        int ssize;
        real_pte_t rpte;
-       int i;
+       int i, offset;
 
        i = batch->index;
 
@@ -67,6 +67,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
                psize = get_slice_psize(mm, addr);
                /* Mask the address for the correct page size */
                addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
+               if (unlikely(psize == MMU_PAGE_16G))
+                       offset = PTRS_PER_PUD;
+               else
+                       offset = PTRS_PER_PMD;
 #else
                BUG();
                psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
@@ -78,6 +82,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
                 * support 64k pages, this might be different from the
                 * hardware page size encoded in the slice table. */
                addr &= PAGE_MASK;
+               offset = PTRS_PER_PTE;
        }
 
 
@@ -91,7 +96,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
        }
        WARN_ON(vsid == 0);
        vpn = hpt_vpn(addr, vsid, ssize);
-       rpte = __real_pte(__pte(pte), ptep);
+       rpte = __real_pte(__pte(pte), ptep, offset);
 
        /*
         * Check if we have an active batch on this CPU. If not, just
index dd4c9b8b8a81e6967b29061014918b4f591921df..f6f55ab4980e7684a09942a510daf689f79f6d1c 100644 (file)
@@ -199,9 +199,11 @@ static void disable_nest_pmu_counters(void)
        const struct cpumask *l_cpumask;
 
        get_online_cpus();
-       for_each_online_node(nid) {
+       for_each_node_with_cpus(nid) {
                l_cpumask = cpumask_of_node(nid);
-               cpu = cpumask_first(l_cpumask);
+               cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
+               if (cpu >= nr_cpu_ids)
+                       continue;
                opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
                                       get_hard_smp_processor_id(cpu));
        }
index 2b3eb01ab1107145395b0c697da4743bd2e97c8c..b7c53a51c31bbe5ba5fa62adbf6d1e97c3c1e8c3 100644 (file)
@@ -1063,16 +1063,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
                        rc = PTR_ERR(txwin->paste_kaddr);
                        goto free_window;
                }
+       } else {
+               /*
+                * A user mapping must ensure that context switch issues
+                * CP_ABORT for this thread.
+                */
+               rc = set_thread_uses_vas();
+               if (rc)
+                       goto free_window;
        }
 
-       /*
-        * Now that we have a send window, ensure context switch issues
-        * CP_ABORT for this thread.
-        */
-       rc = -EINVAL;
-       if (set_thread_uses_vas() < 0)
-               goto free_window;
-
        set_vinst_win(vinst, txwin);
 
        return txwin;
index dceb51454d8d212a5cbc78ef891322bddf499800..652d3e96b812b93834323e1a3a60a1e5bbab3612 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/xics.h>
 #include <asm/xive.h>
 #include <asm/plpar_wrappers.h>
+#include <asm/topology.h>
 
 #include "pseries.h"
 #include "offline_states.h"
@@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np)
                        BUG_ON(cpu_online(cpu));
                        set_cpu_present(cpu, false);
                        set_hard_smp_processor_id(cpu, -1);
+                       update_numa_cpu_lookup_table(cpu, -1);
                        break;
                }
                if (cpu >= nr_cpu_ids)
@@ -340,8 +342,6 @@ static void pseries_remove_processor(struct device_node *np)
        cpu_maps_update_done();
 }
 
-extern int find_and_online_cpu_nid(int cpu);
-
 static int dlpar_online_cpu(struct device_node *dn)
 {
        int rc = 0;
index 81d8614e73790b1923a3c8cfd336a2531fee76ce..5e1ef915018208c3511ef0e91c0064c8c9474389 100644 (file)
@@ -48,6 +48,28 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
 static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
 
 
+/*
+ * Enable the hotplug interrupt late because processing them may touch other
+ * devices or systems (e.g. hugepages) that have not been initialized at the
+ * subsys stage.
+ */
+int __init init_ras_hotplug_IRQ(void)
+{
+       struct device_node *np;
+
+       /* Hotplug Events */
+       np = of_find_node_by_path("/event-sources/hot-plug-events");
+       if (np != NULL) {
+               if (dlpar_workqueue_init() == 0)
+                       request_event_sources_irqs(np, ras_hotplug_interrupt,
+                                                  "RAS_HOTPLUG");
+               of_node_put(np);
+       }
+
+       return 0;
+}
+machine_late_initcall(pseries, init_ras_hotplug_IRQ);
+
 /*
  * Initialize handlers for the set of interrupts caused by hardware errors
  * and power system events.
@@ -66,15 +88,6 @@ static int __init init_ras_IRQ(void)
                of_node_put(np);
        }
 
-       /* Hotplug Events */
-       np = of_find_node_by_path("/event-sources/hot-plug-events");
-       if (np != NULL) {
-               if (dlpar_workqueue_init() == 0)
-                       request_event_sources_irqs(np, ras_hotplug_interrupt,
-                                          "RAS_HOTPLUG");
-               of_node_put(np);
-       }
-
        /* EPOW Events */
        np = of_find_node_by_path("/event-sources/epow-events");
        if (np != NULL) {
index d9c4c93660491849029044d37ab8e60160b0d7de..091f1d0d0af190a0d6f274b8cab32960cc1eef86 100644 (file)
@@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
 
        rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
        if (rc) {
-               pr_err("Error %lld getting queue info prio %d\n", rc, prio);
+               pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
+                      target, prio);
                rc = -EIO;
                goto fail;
        }
@@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
        /* Configure and enable the queue in HW */
        rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
        if (rc) {
-               pr_err("Error %lld setting queue for prio %d\n", rc, prio);
+               pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
+                      target, prio);
                rc = -EIO;
        } else {
                q->qpage = qpage;
@@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
        if (IS_ERR(qpage))
                return PTR_ERR(qpage);
 
-       return xive_spapr_configure_queue(cpu, q, prio, qpage,
-                                         xive_queue_shift);
+       return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
+                                         q, prio, qpage, xive_queue_shift);
 }
 
 static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
@@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
        struct xive_q *q = &xc->queue[prio];
        unsigned int alloc_order;
        long rc;
+       int hw_cpu = get_hard_smp_processor_id(cpu);
 
-       rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0);
+       rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
        if (rc)
-               pr_err("Error %ld setting queue for prio %d\n", rc, prio);
+               pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
+                      hw_cpu, prio);
 
        alloc_order = xive_alloc_order(xive_queue_shift);
        free_pages((unsigned long)q->qpage, alloc_order);
index 6bf594ace663ec82f746132b4f62fc351bf5160c..8767e45f1b2b70953583a7a7157707696466d407 100644 (file)
@@ -430,6 +430,8 @@ config SPARC_LEON
        depends on SPARC32
        select USB_EHCI_BIG_ENDIAN_MMIO
        select USB_EHCI_BIG_ENDIAN_DESC
+       select USB_UHCI_BIG_ENDIAN_MMIO
+       select USB_UHCI_BIG_ENDIAN_DESC
        ---help---
          If you say Y here if you are running on a SPARC-LEON processor.
          The LEON processor is a synthesizable VHDL model of the
index aff152c87cf4ba62ed26ed6b7eb567b576dd84fd..5a82bac5e0bc7985529aa537e981997109945309 100644 (file)
@@ -1,6 +1,7 @@
 boot/compressed/vmlinux
 tools/test_get_len
 tools/insn_sanity
+tools/insn_decoder_test
 purgatory/kexec-purgatory.c
 purgatory/purgatory.ro
 
index 63bf349b2b24a8807c4f65869af50bab99e4c2f7..c1236b187824e222a2c7fddd417272369767b06b 100644 (file)
@@ -423,12 +423,6 @@ config X86_MPPARSE
          For old smp systems that do not have proper acpi support. Newer systems
          (esp with 64bit cpus) with acpi support, MADT and DSDT will override it
 
-config X86_BIGSMP
-       bool "Support for big SMP systems with more than 8 CPUs"
-       depends on X86_32 && SMP
-       ---help---
-         This option is needed for the systems that have more than 8 CPUs
-
 config GOLDFISH
        def_bool y
        depends on X86_GOLDFISH
@@ -460,6 +454,12 @@ config INTEL_RDT
          Say N if unsure.
 
 if X86_32
+config X86_BIGSMP
+       bool "Support for big SMP systems with more than 8 CPUs"
+       depends on SMP
+       ---help---
+         This option is needed for the systems that have more than 8 CPUs
+
 config X86_EXTENDED_PLATFORM
        bool "Support for extended (non-PC) x86 platforms"
        default y
@@ -949,25 +949,66 @@ config MAXSMP
          Enable maximum number of CPUS and NUMA Nodes for this architecture.
          If unsure, say N.
 
+#
+# The maximum number of CPUs supported:
+#
+# The main config value is NR_CPUS, which defaults to NR_CPUS_DEFAULT,
+# and which can be configured interactively in the
+# [NR_CPUS_RANGE_BEGIN ... NR_CPUS_RANGE_END] range.
+#
+# The ranges are different on 32-bit and 64-bit kernels, depending on
+# hardware capabilities and scalability features of the kernel.
+#
+# ( If MAXSMP is enabled we just use the highest possible value and disable
+#   interactive configuration. )
+#
+
+config NR_CPUS_RANGE_BEGIN
+       int
+       default NR_CPUS_RANGE_END if MAXSMP
+       default    1 if !SMP
+       default    2
+
+config NR_CPUS_RANGE_END
+       int
+       depends on X86_32
+       default   64 if  SMP &&  X86_BIGSMP
+       default    8 if  SMP && !X86_BIGSMP
+       default    1 if !SMP
+
+config NR_CPUS_RANGE_END
+       int
+       depends on X86_64
+       default 8192 if  SMP && ( MAXSMP ||  CPUMASK_OFFSTACK)
+       default  512 if  SMP && (!MAXSMP && !CPUMASK_OFFSTACK)
+       default    1 if !SMP
+
+config NR_CPUS_DEFAULT
+       int
+       depends on X86_32
+       default   32 if  X86_BIGSMP
+       default    8 if  SMP
+       default    1 if !SMP
+
+config NR_CPUS_DEFAULT
+       int
+       depends on X86_64
+       default 8192 if  MAXSMP
+       default   64 if  SMP
+       default    1 if !SMP
+
 config NR_CPUS
        int "Maximum number of CPUs" if SMP && !MAXSMP
-       range 2 8 if SMP && X86_32 && !X86_BIGSMP
-       range 2 64 if SMP && X86_32 && X86_BIGSMP
-       range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64
-       range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64
-       default "1" if !SMP
-       default "8192" if MAXSMP
-       default "32" if SMP && X86_BIGSMP
-       default "8" if SMP && X86_32
-       default "64" if SMP
+       range NR_CPUS_RANGE_BEGIN NR_CPUS_RANGE_END
+       default NR_CPUS_DEFAULT
        ---help---
          This allows you to specify the maximum number of CPUs which this
          kernel will support.  If CPUMASK_OFFSTACK is enabled, the maximum
          supported value is 8192, otherwise the maximum value is 512.  The
          minimum value which makes sense is 2.
 
-         This is purely to save memory - each supported CPU adds
-         approximately eight kilobytes to the kernel image.
+         This is purely to save memory: each supported CPU adds about 8KB
+         to the kernel image.
 
 config SCHED_SMT
        bool "SMT (Hyperthreading) scheduler support"
@@ -1363,7 +1404,7 @@ config HIGHMEM4G
 
 config HIGHMEM64G
        bool "64GB"
-       depends on !M486
+       depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
        select X86_PAE
        ---help---
          Select this if you have a 32-bit processor and more than 4
index 65a9a4716e34f55394d057335629d0b32ec55ada..8b8d2297d4867b06acaecf31e3be7124888f4d79 100644 (file)
@@ -374,7 +374,7 @@ config X86_TSC
 
 config X86_CMPXCHG64
        def_bool y
-       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
+       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
 
 # this should be set for all -march=.. options where the compiler
 # generates cmov.
@@ -385,7 +385,7 @@ config X86_CMOV
 config X86_MINIMUM_CPU_FAMILY
        int
        default "64" if X86_64
-       default "6" if X86_32 && X86_P6_NOP
+       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
        default "5" if X86_32 && X86_CMPXCHG64
        default "4"
 
index 36870b26067a73655b96d137c04977c8f392237d..d08805032f0193ab96dde00890a0502a9f2b24e6 100644 (file)
@@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
 {
        unsigned int j;
 
-       state->lens[0] = 0;
-       state->lens[1] = 1;
-       state->lens[2] = 2;
-       state->lens[3] = 3;
+       /* initially all lanes are unused */
+       state->lens[0] = 0xFFFFFFFF00000000;
+       state->lens[1] = 0xFFFFFFFF00000001;
+       state->lens[2] = 0xFFFFFFFF00000002;
+       state->lens[3] = 0xFFFFFFFF00000003;
+
        state->unused_lanes = 0xFF03020100;
        for (j = 0; j < 4; j++)
                state->ldata[j].job_in_lane = NULL;
index 3f48f695d5e6ac6546a009c734fcac517564b24d..dce7092ab24a247c1165f80b17c687d255023a05 100644 (file)
@@ -97,80 +97,69 @@ For 32-bit we have the following conventions - kernel is built with
 
 #define SIZEOF_PTREGS  21*8
 
-       .macro ALLOC_PT_GPREGS_ON_STACK
-       addq    $-(15*8), %rsp
-       .endm
+.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
+       /*
+        * Push registers and sanitize registers of values that a
+        * speculation attack might otherwise want to exploit. The
+        * lower registers are likely clobbered well before they
+        * could be put to use in a speculative execution gadget.
+        * Interleave XOR with PUSH for better uop scheduling:
+        */
+       pushq   %rdi            /* pt_regs->di */
+       pushq   %rsi            /* pt_regs->si */
+       pushq   \rdx            /* pt_regs->dx */
+       pushq   %rcx            /* pt_regs->cx */
+       pushq   \rax            /* pt_regs->ax */
+       pushq   %r8             /* pt_regs->r8 */
+       xorq    %r8, %r8        /* nospec   r8 */
+       pushq   %r9             /* pt_regs->r9 */
+       xorq    %r9, %r9        /* nospec   r9 */
+       pushq   %r10            /* pt_regs->r10 */
+       xorq    %r10, %r10      /* nospec   r10 */
+       pushq   %r11            /* pt_regs->r11 */
+       xorq    %r11, %r11      /* nospec   r11*/
+       pushq   %rbx            /* pt_regs->rbx */
+       xorl    %ebx, %ebx      /* nospec   rbx*/
+       pushq   %rbp            /* pt_regs->rbp */
+       xorl    %ebp, %ebp      /* nospec   rbp*/
+       pushq   %r12            /* pt_regs->r12 */
+       xorq    %r12, %r12      /* nospec   r12*/
+       pushq   %r13            /* pt_regs->r13 */
+       xorq    %r13, %r13      /* nospec   r13*/
+       pushq   %r14            /* pt_regs->r14 */
+       xorq    %r14, %r14      /* nospec   r14*/
+       pushq   %r15            /* pt_regs->r15 */
+       xorq    %r15, %r15      /* nospec   r15*/
+       UNWIND_HINT_REGS
+.endm
 
-       .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
-       .if \r11
-       movq %r11, 6*8+\offset(%rsp)
-       .endif
-       .if \r8910
-       movq %r10, 7*8+\offset(%rsp)
-       movq %r9,  8*8+\offset(%rsp)
-       movq %r8,  9*8+\offset(%rsp)
-       .endif
-       .if \rax
-       movq %rax, 10*8+\offset(%rsp)
-       .endif
-       .if \rcx
-       movq %rcx, 11*8+\offset(%rsp)
-       .endif
-       movq %rdx, 12*8+\offset(%rsp)
-       movq %rsi, 13*8+\offset(%rsp)
-       movq %rdi, 14*8+\offset(%rsp)
-       UNWIND_HINT_REGS offset=\offset extra=0
-       .endm
-       .macro SAVE_C_REGS offset=0
-       SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
-       .endm
-       .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
-       SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
-       .endm
-       .macro SAVE_C_REGS_EXCEPT_R891011
-       SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
-       .endm
-       .macro SAVE_C_REGS_EXCEPT_RCX_R891011
-       SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
-       .endm
-       .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
-       SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
-       .endm
-
-       .macro SAVE_EXTRA_REGS offset=0
-       movq %r15, 0*8+\offset(%rsp)
-       movq %r14, 1*8+\offset(%rsp)
-       movq %r13, 2*8+\offset(%rsp)
-       movq %r12, 3*8+\offset(%rsp)
-       movq %rbp, 4*8+\offset(%rsp)
-       movq %rbx, 5*8+\offset(%rsp)
-       UNWIND_HINT_REGS offset=\offset
-       .endm
-
-       .macro POP_EXTRA_REGS
+.macro POP_REGS pop_rdi=1 skip_r11rcx=0
        popq %r15
        popq %r14
        popq %r13
        popq %r12
        popq %rbp
        popq %rbx
-       .endm
-
-       .macro POP_C_REGS
+       .if \skip_r11rcx
+       popq %rsi
+       .else
        popq %r11
+       .endif
        popq %r10
        popq %r9
        popq %r8
        popq %rax
+       .if \skip_r11rcx
+       popq %rsi
+       .else
        popq %rcx
+       .endif
        popq %rdx
        popq %rsi
+       .if \pop_rdi
        popq %rdi
-       .endm
-
-       .macro icebp
-       .byte 0xf1
-       .endm
+       .endif
+.endm
 
 /*
  * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
@@ -178,7 +167,7 @@ For 32-bit we have the following conventions - kernel is built with
  * is just setting the LSB, which makes it an invalid stack address and is also
  * a signal to the unwinder that it's a pt_regs pointer in disguise.
  *
- * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
+ * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts
  * the original rbp.
  */
 .macro ENCODE_FRAME_POINTER ptregs_offset=0
index 30c8c5344c4a5dcfeb96d0711a322e50de33d324..8971bd64d515c5bb4a9b95108fd802b8418764f2 100644 (file)
@@ -213,7 +213,7 @@ ENTRY(entry_SYSCALL_64)
 
        swapgs
        /*
-        * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
+        * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it
         * is not required to switch CR3.
         */
        movq    %rsp, PER_CPU_VAR(rsp_scratch)
@@ -227,22 +227,8 @@ ENTRY(entry_SYSCALL_64)
        pushq   %rcx                            /* pt_regs->ip */
 GLOBAL(entry_SYSCALL_64_after_hwframe)
        pushq   %rax                            /* pt_regs->orig_ax */
-       pushq   %rdi                            /* pt_regs->di */
-       pushq   %rsi                            /* pt_regs->si */
-       pushq   %rdx                            /* pt_regs->dx */
-       pushq   %rcx                            /* pt_regs->cx */
-       pushq   $-ENOSYS                        /* pt_regs->ax */
-       pushq   %r8                             /* pt_regs->r8 */
-       pushq   %r9                             /* pt_regs->r9 */
-       pushq   %r10                            /* pt_regs->r10 */
-       pushq   %r11                            /* pt_regs->r11 */
-       pushq   %rbx                            /* pt_regs->rbx */
-       pushq   %rbp                            /* pt_regs->rbp */
-       pushq   %r12                            /* pt_regs->r12 */
-       pushq   %r13                            /* pt_regs->r13 */
-       pushq   %r14                            /* pt_regs->r14 */
-       pushq   %r15                            /* pt_regs->r15 */
-       UNWIND_HINT_REGS
+
+       PUSH_AND_CLEAR_REGS rax=$-ENOSYS
 
        TRACE_IRQS_OFF
 
@@ -321,15 +307,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
 syscall_return_via_sysret:
        /* rcx and r11 are already restored (see code above) */
        UNWIND_HINT_EMPTY
-       POP_EXTRA_REGS
-       popq    %rsi    /* skip r11 */
-       popq    %r10
-       popq    %r9
-       popq    %r8
-       popq    %rax
-       popq    %rsi    /* skip rcx */
-       popq    %rdx
-       popq    %rsi
+       POP_REGS pop_rdi=0 skip_r11rcx=1
 
        /*
         * Now all regs are restored except RSP and RDI.
@@ -559,9 +537,7 @@ END(irq_entries_start)
        call    switch_to_thread_stack
 1:
 
-       ALLOC_PT_GPREGS_ON_STACK
-       SAVE_C_REGS
-       SAVE_EXTRA_REGS
+       PUSH_AND_CLEAR_REGS
        ENCODE_FRAME_POINTER
 
        testb   $3, CS(%rsp)
@@ -622,15 +598,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
        ud2
 1:
 #endif
-       POP_EXTRA_REGS
-       popq    %r11
-       popq    %r10
-       popq    %r9
-       popq    %r8
-       popq    %rax
-       popq    %rcx
-       popq    %rdx
-       popq    %rsi
+       POP_REGS pop_rdi=0
 
        /*
         * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
@@ -688,8 +656,7 @@ GLOBAL(restore_regs_and_return_to_kernel)
        ud2
 1:
 #endif
-       POP_EXTRA_REGS
-       POP_C_REGS
+       POP_REGS
        addq    $8, %rsp        /* skip regs->orig_ax */
        /*
         * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -908,7 +875,9 @@ ENTRY(\sym)
        pushq   $-1                             /* ORIG_RAX: no syscall to restart */
        .endif
 
-       ALLOC_PT_GPREGS_ON_STACK
+       /* Save all registers in pt_regs */
+       PUSH_AND_CLEAR_REGS
+       ENCODE_FRAME_POINTER
 
        .if \paranoid < 2
        testb   $3, CS(%rsp)                    /* If coming from userspace, switch stacks */
@@ -1121,9 +1090,7 @@ ENTRY(xen_failsafe_callback)
        addq    $0x30, %rsp
        UNWIND_HINT_IRET_REGS
        pushq   $-1 /* orig_ax = -1 => not a system call */
-       ALLOC_PT_GPREGS_ON_STACK
-       SAVE_C_REGS
-       SAVE_EXTRA_REGS
+       PUSH_AND_CLEAR_REGS
        ENCODE_FRAME_POINTER
        jmp     error_exit
 END(xen_failsafe_callback)
@@ -1163,16 +1130,13 @@ idtentry machine_check          do_mce                  has_error_code=0        paranoid=1
 #endif
 
 /*
- * Save all registers in pt_regs, and switch gs if needed.
+ * Switch gs if needed.
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
 ENTRY(paranoid_entry)
        UNWIND_HINT_FUNC
        cld
-       SAVE_C_REGS 8
-       SAVE_EXTRA_REGS 8
-       ENCODE_FRAME_POINTER 8
        movl    $1, %ebx
        movl    $MSR_GS_BASE, %ecx
        rdmsr
@@ -1211,21 +1175,18 @@ ENTRY(paranoid_exit)
        jmp     .Lparanoid_exit_restore
 .Lparanoid_exit_no_swapgs:
        TRACE_IRQS_IRETQ_DEBUG
+       RESTORE_CR3     scratch_reg=%rbx save_reg=%r14
 .Lparanoid_exit_restore:
        jmp restore_regs_and_return_to_kernel
 END(paranoid_exit)
 
 /*
- * Save all registers in pt_regs, and switch gs if needed.
+ * Switch gs if needed.
  * Return: EBX=0: came from user mode; EBX=1: otherwise
  */
 ENTRY(error_entry)
-       UNWIND_HINT_FUNC
+       UNWIND_HINT_REGS offset=8
        cld
-       SAVE_C_REGS 8
-       SAVE_EXTRA_REGS 8
-       ENCODE_FRAME_POINTER 8
-       xorl    %ebx, %ebx
        testb   $3, CS+8(%rsp)
        jz      .Lerror_kernelspace
 
@@ -1406,22 +1367,7 @@ ENTRY(nmi)
        pushq   1*8(%rdx)       /* pt_regs->rip */
        UNWIND_HINT_IRET_REGS
        pushq   $-1             /* pt_regs->orig_ax */
-       pushq   %rdi            /* pt_regs->di */
-       pushq   %rsi            /* pt_regs->si */
-       pushq   (%rdx)          /* pt_regs->dx */
-       pushq   %rcx            /* pt_regs->cx */
-       pushq   %rax            /* pt_regs->ax */
-       pushq   %r8             /* pt_regs->r8 */
-       pushq   %r9             /* pt_regs->r9 */
-       pushq   %r10            /* pt_regs->r10 */
-       pushq   %r11            /* pt_regs->r11 */
-       pushq   %rbx            /* pt_regs->rbx */
-       pushq   %rbp            /* pt_regs->rbp */
-       pushq   %r12            /* pt_regs->r12 */
-       pushq   %r13            /* pt_regs->r13 */
-       pushq   %r14            /* pt_regs->r14 */
-       pushq   %r15            /* pt_regs->r15 */
-       UNWIND_HINT_REGS
+       PUSH_AND_CLEAR_REGS rdx=(%rdx)
        ENCODE_FRAME_POINTER
 
        /*
@@ -1631,7 +1577,8 @@ end_repeat_nmi:
         * frame to point back to repeat_nmi.
         */
        pushq   $-1                             /* ORIG_RAX: no syscall to restart */
-       ALLOC_PT_GPREGS_ON_STACK
+       PUSH_AND_CLEAR_REGS
+       ENCODE_FRAME_POINTER
 
        /*
         * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
@@ -1655,8 +1602,7 @@ end_repeat_nmi:
 nmi_swapgs:
        SWAPGS_UNSAFE_STACK
 nmi_restore:
-       POP_EXTRA_REGS
-       POP_C_REGS
+       POP_REGS
 
        /*
         * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
index 98d5358e4041a7e144ec566f7db19ff054cedbcc..fd65e016e4133f5634545062d098fc25fb5c0b1d 100644 (file)
@@ -85,15 +85,25 @@ ENTRY(entry_SYSENTER_compat)
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
        pushq   $0                      /* pt_regs->r8  = 0 */
+       xorq    %r8, %r8                /* nospec   r8 */
        pushq   $0                      /* pt_regs->r9  = 0 */
+       xorq    %r9, %r9                /* nospec   r9 */
        pushq   $0                      /* pt_regs->r10 = 0 */
+       xorq    %r10, %r10              /* nospec   r10 */
        pushq   $0                      /* pt_regs->r11 = 0 */
+       xorq    %r11, %r11              /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
+       xorl    %ebx, %ebx              /* nospec   rbx */
        pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
+       xorl    %ebp, %ebp              /* nospec   rbp */
        pushq   $0                      /* pt_regs->r12 = 0 */
+       xorq    %r12, %r12              /* nospec   r12 */
        pushq   $0                      /* pt_regs->r13 = 0 */
+       xorq    %r13, %r13              /* nospec   r13 */
        pushq   $0                      /* pt_regs->r14 = 0 */
+       xorq    %r14, %r14              /* nospec   r14 */
        pushq   $0                      /* pt_regs->r15 = 0 */
+       xorq    %r15, %r15              /* nospec   r15 */
        cld
 
        /*
@@ -214,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
        pushq   %rbp                    /* pt_regs->cx (stashed in bp) */
        pushq   $-ENOSYS                /* pt_regs->ax */
        pushq   $0                      /* pt_regs->r8  = 0 */
+       xorq    %r8, %r8                /* nospec   r8 */
        pushq   $0                      /* pt_regs->r9  = 0 */
+       xorq    %r9, %r9                /* nospec   r9 */
        pushq   $0                      /* pt_regs->r10 = 0 */
+       xorq    %r10, %r10              /* nospec   r10 */
        pushq   $0                      /* pt_regs->r11 = 0 */
+       xorq    %r11, %r11              /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
+       xorl    %ebx, %ebx              /* nospec   rbx */
        pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
+       xorl    %ebp, %ebp              /* nospec   rbp */
        pushq   $0                      /* pt_regs->r12 = 0 */
+       xorq    %r12, %r12              /* nospec   r12 */
        pushq   $0                      /* pt_regs->r13 = 0 */
+       xorq    %r13, %r13              /* nospec   r13 */
        pushq   $0                      /* pt_regs->r14 = 0 */
+       xorq    %r14, %r14              /* nospec   r14 */
        pushq   $0                      /* pt_regs->r15 = 0 */
+       xorq    %r15, %r15              /* nospec   r15 */
 
        /*
         * User mode is traced as though IRQs are on, and SYSENTER
@@ -338,15 +358,25 @@ ENTRY(entry_INT80_compat)
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
        pushq   $0                      /* pt_regs->r8  = 0 */
+       xorq    %r8, %r8                /* nospec   r8 */
        pushq   $0                      /* pt_regs->r9  = 0 */
+       xorq    %r9, %r9                /* nospec   r9 */
        pushq   $0                      /* pt_regs->r10 = 0 */
+       xorq    %r10, %r10              /* nospec   r10 */
        pushq   $0                      /* pt_regs->r11 = 0 */
+       xorq    %r11, %r11              /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
+       xorl    %ebx, %ebx              /* nospec   rbx */
        pushq   %rbp                    /* pt_regs->rbp */
+       xorl    %ebp, %ebp              /* nospec   rbp */
        pushq   %r12                    /* pt_regs->r12 */
+       xorq    %r12, %r12              /* nospec   r12 */
        pushq   %r13                    /* pt_regs->r13 */
+       xorq    %r13, %r13              /* nospec   r13 */
        pushq   %r14                    /* pt_regs->r14 */
+       xorq    %r14, %r14              /* nospec   r14 */
        pushq   %r15                    /* pt_regs->r15 */
+       xorq    %r15, %r15              /* nospec   r15 */
        cld
 
        /*
index 731153a4681e73f761dea8c0c15ce6757b89860e..56457cb73448b494b1aed2138fcee8e13d50d81b 100644 (file)
@@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu)
                break;
 
        case INTEL_FAM6_SANDYBRIDGE_X:
-               switch (cpu_data(cpu).x86_mask) {
+               switch (cpu_data(cpu).x86_stepping) {
                case 6: rev = 0x618; break;
                case 7: rev = 0x70c; break;
                }
index ae64d0b69729dbb23c436d6c3bbf360a3cafb53b..cf372b90557ed4e8a788c8f97b515ac956d2512e 100644 (file)
@@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void)
         * on PMU interrupt
         */
        if (boot_cpu_data.x86_model == 28
-           && boot_cpu_data.x86_mask < 10) {
+           && boot_cpu_data.x86_stepping < 10) {
                pr_cont("LBR disabled due to erratum");
                return;
        }
index a5604c3529308b7cfc1dc336a8d53496d531659b..408879b0c0d4e41c56906d2464734279c9360f7e 100644 (file)
@@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = {
 
 static __init void p6_pmu_rdpmc_quirk(void)
 {
-       if (boot_cpu_data.x86_mask < 9) {
+       if (boot_cpu_data.x86_stepping < 9) {
                /*
                 * PPro erratum 26; fixed in stepping 9 and above.
                 */
index 44f5d79d51056b036e7ef4536d7bfe340dae9747..11881726ed37290128f9bca07b9944aa2d818677 100644 (file)
@@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
        if (boot_cpu_data.x86 == 0x0F &&
            boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
            boot_cpu_data.x86_model <= 0x05 &&
-           boot_cpu_data.x86_mask < 0x0A)
+           boot_cpu_data.x86_stepping < 0x0A)
                return 1;
        else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
                return 1;
index 30d40614601641b9fc2dbd5ce734ff86d0cdd731..e1259f043ae999fa21e1f998431ab12cd73a11ea 100644 (file)
@@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 
        asm ("cmp %1,%2; sbb %0,%0;"
                        :"=r" (mask)
-                       :"r"(size),"r" (index)
+                       :"g"(size),"r" (index)
                        :"cc");
        return mask;
 }
index 34d99af43994453e1cec89aa202cc17b052ee690..6804d66427673ec314659944e65052b5dfba273e 100644 (file)
@@ -5,23 +5,20 @@
 #include <linux/stringify.h>
 
 /*
- * Since some emulators terminate on UD2, we cannot use it for WARN.
- * Since various instruction decoders disagree on the length of UD1,
- * we cannot use it either. So use UD0 for WARN.
+ * Despite that some emulators terminate on UD2, we use it for WARN().
  *
- * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas
- *  our kernel decoder thinks it takes a ModRM byte, which seems consistent
- *  with various things like the Intel SDM instruction encoding rules)
+ * Since various instruction decoders/specs disagree on the encoding of
+ * UD0/UD1.
  */
 
-#define ASM_UD0                ".byte 0x0f, 0xff"
+#define ASM_UD0                ".byte 0x0f, 0xff" /* + ModRM (for Intel) */
 #define ASM_UD1                ".byte 0x0f, 0xb9" /* + ModRM */
 #define ASM_UD2                ".byte 0x0f, 0x0b"
 
 #define INSN_UD0       0xff0f
 #define INSN_UD2       0x0b0f
 
-#define LEN_UD0                2
+#define LEN_UD2                2
 
 #ifdef CONFIG_GENERIC_BUG
 
@@ -77,7 +74,11 @@ do {                                                         \
        unreachable();                                          \
 } while (0)
 
-#define __WARN_FLAGS(flags)    _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags))
+#define __WARN_FLAGS(flags)                                    \
+do {                                                           \
+       _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags));           \
+       annotate_reachable();                                   \
+} while (0)
 
 #include <asm-generic/bug.h>
 
index 70eddb3922ff7b3e44fc27b9426da39aa4e2b6fa..736771c9822ef965233b7114fd0b1a025e8c3a46 100644 (file)
@@ -148,45 +148,46 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
  */
 static __always_inline __pure bool _static_cpu_has(u16 bit)
 {
-               asm_volatile_goto("1: jmp 6f\n"
-                        "2:\n"
-                        ".skip -(((5f-4f) - (2b-1b)) > 0) * "
-                                "((5f-4f) - (2b-1b)),0x90\n"
-                        "3:\n"
-                        ".section .altinstructions,\"a\"\n"
-                        " .long 1b - .\n"              /* src offset */
-                        " .long 4f - .\n"              /* repl offset */
-                        " .word %P1\n"                 /* always replace */
-                        " .byte 3b - 1b\n"             /* src len */
-                        " .byte 5f - 4f\n"             /* repl len */
-                        " .byte 3b - 2b\n"             /* pad len */
-                        ".previous\n"
-                        ".section .altinstr_replacement,\"ax\"\n"
-                        "4: jmp %l[t_no]\n"
-                        "5:\n"
-                        ".previous\n"
-                        ".section .altinstructions,\"a\"\n"
-                        " .long 1b - .\n"              /* src offset */
-                        " .long 0\n"                   /* no replacement */
-                        " .word %P0\n"                 /* feature bit */
-                        " .byte 3b - 1b\n"             /* src len */
-                        " .byte 0\n"                   /* repl len */
-                        " .byte 0\n"                   /* pad len */
-                        ".previous\n"
-                        ".section .altinstr_aux,\"ax\"\n"
-                        "6:\n"
-                        " testb %[bitnum],%[cap_byte]\n"
-                        " jnz %l[t_yes]\n"
-                        " jmp %l[t_no]\n"
-                        ".previous\n"
-                        : : "i" (bit), "i" (X86_FEATURE_ALWAYS),
-                            [bitnum] "i" (1 << (bit & 7)),
-                            [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
-                        : : t_yes, t_no);
-       t_yes:
-               return true;
-       t_no:
-               return false;
+       asm_volatile_goto("1: jmp 6f\n"
+                "2:\n"
+                ".skip -(((5f-4f) - (2b-1b)) > 0) * "
+                        "((5f-4f) - (2b-1b)),0x90\n"
+                "3:\n"
+                ".section .altinstructions,\"a\"\n"
+                " .long 1b - .\n"              /* src offset */
+                " .long 4f - .\n"              /* repl offset */
+                " .word %P[always]\n"          /* always replace */
+                " .byte 3b - 1b\n"             /* src len */
+                " .byte 5f - 4f\n"             /* repl len */
+                " .byte 3b - 2b\n"             /* pad len */
+                ".previous\n"
+                ".section .altinstr_replacement,\"ax\"\n"
+                "4: jmp %l[t_no]\n"
+                "5:\n"
+                ".previous\n"
+                ".section .altinstructions,\"a\"\n"
+                " .long 1b - .\n"              /* src offset */
+                " .long 0\n"                   /* no replacement */
+                " .word %P[feature]\n"         /* feature bit */
+                " .byte 3b - 1b\n"             /* src len */
+                " .byte 0\n"                   /* repl len */
+                " .byte 0\n"                   /* pad len */
+                ".previous\n"
+                ".section .altinstr_aux,\"ax\"\n"
+                "6:\n"
+                " testb %[bitnum],%[cap_byte]\n"
+                " jnz %l[t_yes]\n"
+                " jmp %l[t_no]\n"
+                ".previous\n"
+                : : [feature]  "i" (bit),
+                    [always]   "i" (X86_FEATURE_ALWAYS),
+                    [bitnum]   "i" (1 << (bit & 7)),
+                    [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
+                : : t_yes, t_no);
+t_yes:
+       return true;
+t_no:
+       return false;
 }
 
 #define static_cpu_has(bit)                                    \
index 4d57894635f242da061e6a10acccaec70ae1dbc0..76b058533e473b10d99e7a1ee4b661b5b2b2b14d 100644 (file)
@@ -6,6 +6,7 @@
 #include <asm/alternative.h>
 #include <asm/alternative-asm.h>
 #include <asm/cpufeatures.h>
+#include <asm/msr-index.h>
 
 #ifdef __ASSEMBLY__
 
@@ -164,10 +165,15 @@ static inline void vmexit_fill_RSB(void)
 
 static inline void indirect_branch_prediction_barrier(void)
 {
-       alternative_input("",
-                         "call __ibp_barrier",
-                         X86_FEATURE_USE_IBPB,
-                         ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
+       asm volatile(ALTERNATIVE("",
+                                "movl %[msr], %%ecx\n\t"
+                                "movl %[val], %%eax\n\t"
+                                "movl $0, %%edx\n\t"
+                                "wrmsr",
+                                X86_FEATURE_USE_IBPB)
+                    : : [msr] "i" (MSR_IA32_PRED_CMD),
+                        [val] "i" (PRED_CMD_IBPB)
+                    : "eax", "ecx", "edx", "memory");
 }
 
 #endif /* __ASSEMBLY__ */
index 4baa6bceb2325e6dd056ca682e1eeeb435693a26..d652a38080659775ef145d089291bde353ffe97a 100644 (file)
@@ -52,10 +52,6 @@ static inline void clear_page(void *page)
 
 void copy_page(void *to, void *from);
 
-#ifdef CONFIG_X86_MCE
-#define arch_unmap_kpfn arch_unmap_kpfn
-#endif
-
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_VSYSCALL_EMULATION
index 892df375b6155a51f584760efb9f9e77c3f732e8..554841fab717aef09d2b5cc57410a6eed8c2df0c 100644 (file)
@@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void)
 {
        PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
 }
-static inline void __flush_tlb_single(unsigned long addr)
+static inline void __flush_tlb_one_user(unsigned long addr)
 {
-       PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
+       PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
 }
 
 static inline void flush_tlb_others(const struct cpumask *cpumask,
index 6ec54d01972dcf7d79e75bcfbecc84a1f2da80ea..f624f1f10316c248911585f757ea5bd257e98434 100644 (file)
@@ -217,7 +217,7 @@ struct pv_mmu_ops {
        /* TLB operations */
        void (*flush_tlb_user)(void);
        void (*flush_tlb_kernel)(void);
-       void (*flush_tlb_single)(unsigned long addr);
+       void (*flush_tlb_one_user)(unsigned long addr);
        void (*flush_tlb_others)(const struct cpumask *cpus,
                                 const struct flush_tlb_info *info);
 
index e67c0620aec2a268537b46d1da80ce6d0ef174a5..e55466760ff8e031433132eab676535e614e224f 100644 (file)
@@ -61,7 +61,7 @@ void paging_init(void);
 #define kpte_clear_flush(ptep, vaddr)          \
 do {                                           \
        pte_clear(&init_mm, (vaddr), (ptep));   \
-       __flush_tlb_one((vaddr));               \
+       __flush_tlb_one_kernel((vaddr));                \
 } while (0)
 
 #endif /* !__ASSEMBLY__ */
index 793bae7e7ce36bd36e728a8a6fe7f8e17b9920db..1bd9ed87606f45f5a22f2510bde9ba34029a0551 100644 (file)
@@ -91,7 +91,7 @@ struct cpuinfo_x86 {
        __u8                    x86;            /* CPU family */
        __u8                    x86_vendor;     /* CPU vendor */
        __u8                    x86_model;
-       __u8                    x86_mask;
+       __u8                    x86_stepping;
 #ifdef CONFIG_X86_64
        /* Number of 4K pages in DTLB/ITLB combined(in pages): */
        int                     x86_tlbsize;
@@ -109,7 +109,7 @@ struct cpuinfo_x86 {
        char                    x86_vendor_id[16];
        char                    x86_model_id[64];
        /* in KB - valid for CPUS which support this call: */
-       int                     x86_cache_size;
+       unsigned int            x86_cache_size;
        int                     x86_cache_alignment;    /* In bytes */
        /* Cache QoS architectural values: */
        int                     x86_cache_max_rmid;     /* max index */
@@ -977,7 +977,4 @@ bool xen_set_default_idle(void);
 
 void stop_this_cpu(void *dummy);
 void df_debug(struct pt_regs *regs, long error_code);
-
-void __ibp_barrier(void);
-
 #endif /* _ASM_X86_PROCESSOR_H */
index 461f53d27708ae8b80622753122c1a4927537ee2..a4189762b2667016e1e07a0942ff9eaecab9ba51 100644 (file)
@@ -129,6 +129,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 void cpu_disable_common(void);
 void native_smp_prepare_boot_cpu(void);
 void native_smp_prepare_cpus(unsigned int max_cpus);
+void calculate_max_logical_packages(void);
 void native_smp_cpus_done(unsigned int max_cpus);
 void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
 int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
index 2b8f18ca58747ae40b515c2bf674f8faced147e8..84137c22fdfade9bc8224c317808b311ec6f3007 100644 (file)
@@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
 #else
 #define __flush_tlb() __native_flush_tlb()
 #define __flush_tlb_global() __native_flush_tlb_global()
-#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
+#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
 #endif
 
 static inline bool tlb_defer_switch_to_init_mm(void)
@@ -400,7 +400,7 @@ static inline void __native_flush_tlb_global(void)
 /*
  * flush one page in the user mapping
  */
-static inline void __native_flush_tlb_single(unsigned long addr)
+static inline void __native_flush_tlb_one_user(unsigned long addr)
 {
        u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
 
@@ -437,18 +437,31 @@ static inline void __flush_tlb_all(void)
 /*
  * flush one page in the kernel mapping
  */
-static inline void __flush_tlb_one(unsigned long addr)
+static inline void __flush_tlb_one_kernel(unsigned long addr)
 {
        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
-       __flush_tlb_single(addr);
+
+       /*
+        * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
+        * paravirt equivalent.  Even with PCID, this is sufficient: we only
+        * use PCID if we also use global PTEs for the kernel mapping, and
+        * INVLPG flushes global translations across all address spaces.
+        *
+        * If PTI is on, then the kernel is mapped with non-global PTEs, and
+        * __flush_tlb_one_user() will flush the given address for the current
+        * kernel address space and for its usermode counterpart, but it does
+        * not flush it for other address spaces.
+        */
+       __flush_tlb_one_user(addr);
 
        if (!static_cpu_has(X86_FEATURE_PTI))
                return;
 
        /*
-        * __flush_tlb_single() will have cleared the TLB entry for this ASID,
-        * but since kernel space is replicated across all, we must also
-        * invalidate all others.
+        * See above.  We need to propagate the flush to all other address
+        * spaces.  In principle, we only need to propagate it to kernelmode
+        * address spaces, but the extra bookkeeping we would need is not
+        * worth it.
         */
        invalidate_other_asid();
 }
index 6db28f17ff2884e01122f2689b117e8ae63f9ec4..c88e0b127810f22b15b53eb150d11e9584201885 100644 (file)
@@ -235,7 +235,7 @@ int amd_cache_northbridges(void)
        if (boot_cpu_data.x86 == 0x10 &&
            boot_cpu_data.x86_model >= 0x8 &&
            (boot_cpu_data.x86_model > 0x9 ||
-            boot_cpu_data.x86_mask >= 0x1))
+            boot_cpu_data.x86_stepping >= 0x1))
                amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
 
        if (boot_cpu_data.x86 == 0x15)
index 25ddf02598d20a89cb1da2243aba687e6eee7657..b203af0855b57618fc398e29425ef96755c95552 100644 (file)
@@ -546,7 +546,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
 
 static u32 hsx_deadline_rev(void)
 {
-       switch (boot_cpu_data.x86_mask) {
+       switch (boot_cpu_data.x86_stepping) {
        case 0x02: return 0x3a; /* EP */
        case 0x04: return 0x0f; /* EX */
        }
@@ -556,7 +556,7 @@ static u32 hsx_deadline_rev(void)
 
 static u32 bdx_deadline_rev(void)
 {
-       switch (boot_cpu_data.x86_mask) {
+       switch (boot_cpu_data.x86_stepping) {
        case 0x02: return 0x00000011;
        case 0x03: return 0x0700000e;
        case 0x04: return 0x0f00000c;
@@ -568,7 +568,7 @@ static u32 bdx_deadline_rev(void)
 
 static u32 skx_deadline_rev(void)
 {
-       switch (boot_cpu_data.x86_mask) {
+       switch (boot_cpu_data.x86_stepping) {
        case 0x03: return 0x01000136;
        case 0x04: return 0x02000014;
        }
index 46b675aaf20b8a1f30f1eee2cf63717e299c5367..f11910b44638c84995848a5acdecbb296ddb5636 100644 (file)
@@ -1176,16 +1176,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
 
        uv_gre_table = gre;
        for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
+               unsigned long size = ((unsigned long)(gre->limit - lgre)
+                                       << UV_GAM_RANGE_SHFT);
+               int order = 0;
+               char suffix[] = " KMGTPE";
+
+               while (size > 9999 && order < sizeof(suffix)) {
+                       size /= 1024;
+                       order++;
+               }
+
                if (!index) {
                        pr_info("UV: GAM Range Table...\n");
                        pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
                }
-               pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d   %04x  %02x %02x\n",
+               pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d   %04x  %02x %02x\n",
                        index++,
                        (unsigned long)lgre << UV_GAM_RANGE_SHFT,
                        (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
-                       ((unsigned long)(gre->limit - lgre)) >>
-                               (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
+                       size, suffix[order],
                        gre->type, gre->nasid, gre->sockid, gre->pnode);
 
                lgre = gre->limit;
index fa1261eefa16e73cedf27aadb878753be693f919..f91ba53e06c8b90f9d5557a2713896a1a50100f0 100644 (file)
@@ -18,7 +18,7 @@ void foo(void)
        OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
        OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
        OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
-       OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
+       OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
        OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
        OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
        OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
index 5bddbdcbc4a3cf722cd960c032e8ec400369e17a..f0e6456ca7d3cd482893a7d1953aec5d79c4caad 100644 (file)
@@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
                return;
        }
 
-       if (c->x86_model == 6 && c->x86_mask == 1) {
+       if (c->x86_model == 6 && c->x86_stepping == 1) {
                const int K6_BUG_LOOP = 1000000;
                int n;
                void (*f_vide)(void);
@@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
 
        /* K6 with old style WHCR */
        if (c->x86_model < 8 ||
-          (c->x86_model == 8 && c->x86_mask < 8)) {
+          (c->x86_model == 8 && c->x86_stepping < 8)) {
                /* We can only write allocate on the low 508Mb */
                if (mbytes > 508)
                        mbytes = 508;
@@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
                return;
        }
 
-       if ((c->x86_model == 8 && c->x86_mask > 7) ||
+       if ((c->x86_model == 8 && c->x86_stepping > 7) ||
             c->x86_model == 9 || c->x86_model == 13) {
                /* The more serious chips .. */
 
@@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
         * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
         * As per AMD technical note 27212 0.2
         */
-       if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
+       if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
                rdmsr(MSR_K7_CLK_CTL, l, h);
                if ((l & 0xfff00000) != 0x20000000) {
                        pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
@@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
         * but they are not certified as MP capable.
         */
        /* Athlon 660/661 is valid. */
-       if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
-           (c->x86_mask == 1)))
+       if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
+           (c->x86_stepping == 1)))
                return;
 
        /* Duron 670 is valid */
-       if ((c->x86_model == 7) && (c->x86_mask == 0))
+       if ((c->x86_model == 7) && (c->x86_stepping == 0))
                return;
 
        /*
@@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
         * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
         * more.
         */
-       if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
-           ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
+       if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
+           ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
             (c->x86_model > 7))
                if (cpu_has(c, X86_FEATURE_MP))
                        return;
@@ -628,7 +628,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
        /*  Set MTRR capability flag if appropriate */
        if (c->x86 == 5)
                if (c->x86_model == 13 || c->x86_model == 9 ||
-                   (c->x86_model == 8 && c->x86_mask >= 8))
+                   (c->x86_model == 8 && c->x86_stepping >= 8))
                        set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 #endif
 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
@@ -795,7 +795,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
         * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
         * all up to and including B1.
         */
-       if (c->x86_model <= 1 && c->x86_mask <= 1)
+       if (c->x86_model <= 1 && c->x86_stepping <= 1)
                set_cpu_cap(c, X86_FEATURE_CPB);
 }
 
@@ -906,11 +906,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
        /* AMD errata T13 (order #21922) */
        if ((c->x86 == 6)) {
                /* Duron Rev A0 */
-               if (c->x86_model == 3 && c->x86_mask == 0)
+               if (c->x86_model == 3 && c->x86_stepping == 0)
                        size = 64;
                /* Tbird rev A1/A2 */
                if (c->x86_model == 4 &&
-                       (c->x86_mask == 0 || c->x86_mask == 1))
+                       (c->x86_stepping == 0 || c->x86_stepping == 1))
                        size = 256;
        }
        return size;
@@ -1047,7 +1047,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
        }
 
        /* OSVW unavailable or ID unknown, match family-model-stepping range */
-       ms = (cpu->x86_model << 4) | cpu->x86_mask;
+       ms = (cpu->x86_model << 4) | cpu->x86_stepping;
        while ((range = *erratum++))
                if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
                    (ms >= AMD_MODEL_RANGE_START(range)) &&
index 71949bf2de5ad378e8184566010e4aaa6aa2307d..d71c8b54b696d4593ffb15ff894468ad3e524a50 100644 (file)
@@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
        if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
                return SPECTRE_V2_CMD_NONE;
        else {
-               ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
-                                         sizeof(arg));
+               ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
                if (ret < 0)
                        return SPECTRE_V2_CMD_AUTO;
 
@@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
                }
 
                if (i >= ARRAY_SIZE(mitigation_options)) {
-                       pr_err("unknown option (%s). Switching to AUTO select\n",
-                              mitigation_options[i].option);
+                       pr_err("unknown option (%s). Switching to AUTO select\n", arg);
                        return SPECTRE_V2_CMD_AUTO;
                }
        }
@@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
             cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
             cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
            !IS_ENABLED(CONFIG_RETPOLINE)) {
-               pr_err("%s selected but not compiled in. Switching to AUTO select\n",
-                      mitigation_options[i].option);
+               pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
                return SPECTRE_V2_CMD_AUTO;
        }
 
@@ -256,14 +253,14 @@ static void __init spectre_v2_select_mitigation(void)
                        goto retpoline_auto;
                break;
        }
-       pr_err("kernel not compiled with retpoline; no mitigation available!");
+       pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
        return;
 
 retpoline_auto:
        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
        retpoline_amd:
                if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
-                       pr_err("LFENCE not serializing. Switching to generic retpoline\n");
+                       pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
                        goto retpoline_generic;
                }
                mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
@@ -281,7 +278,7 @@ static void __init spectre_v2_select_mitigation(void)
        pr_info("%s\n", spectre_v2_strings[mode]);
 
        /*
-        * If neither SMEP or KPTI are available, there is a risk of
+        * If neither SMEP nor PTI are available, there is a risk of
         * hitting userspace addresses in the RSB after a context switch
         * from a shallow call stack to a deeper one. To prevent this fill
         * the entire RSB, even when using IBRS.
@@ -295,21 +292,20 @@ static void __init spectre_v2_select_mitigation(void)
        if ((!boot_cpu_has(X86_FEATURE_PTI) &&
             !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
                setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
-               pr_info("Filling RSB on context switch\n");
+               pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
        }
 
        /* Initialize Indirect Branch Prediction Barrier if supported */
        if (boot_cpu_has(X86_FEATURE_IBPB)) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
-               pr_info("Enabling Indirect Branch Prediction Barrier\n");
+               pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
        }
 }
 
 #undef pr_fmt
 
 #ifdef CONFIG_SYSFS
-ssize_t cpu_show_meltdown(struct device *dev,
-                         struct device_attribute *attr, char *buf)
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
 {
        if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
                return sprintf(buf, "Not affected\n");
@@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device *dev,
        return sprintf(buf, "Vulnerable\n");
 }
 
-ssize_t cpu_show_spectre_v1(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
 {
        if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
                return sprintf(buf, "Not affected\n");
        return sprintf(buf, "Mitigation: __user pointer sanitization\n");
 }
 
-ssize_t cpu_show_spectre_v2(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
 {
        if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
                return sprintf(buf, "Not affected\n");
@@ -337,9 +331,3 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
                       spectre_v2_module_string());
 }
 #endif
-
-void __ibp_barrier(void)
-{
-       __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
-}
-EXPORT_SYMBOL_GPL(__ibp_barrier);
index c578cd29c2d2c47bd8c03268bc8809c28785d43c..e5ec0f11c0de7c06a0975d852822b2033d914185 100644 (file)
@@ -140,7 +140,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
                        clear_cpu_cap(c, X86_FEATURE_TSC);
                        break;
                case 8:
-                       switch (c->x86_mask) {
+                       switch (c->x86_stepping) {
                        default:
                        name = "2";
                                break;
@@ -215,7 +215,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
         *  - Note, it seems this may only be in engineering samples.
         */
        if ((c->x86 == 6) && (c->x86_model == 9) &&
-                               (c->x86_mask == 1) && (size == 65))
+                               (c->x86_stepping == 1) && (size == 65))
                size -= 1;
        return size;
 }
index d63f4b5706e4d76271da40fe14c6ef4ba41fe999..824aee0117bb5402d52fb5c8958e98b0bebfdf0c 100644 (file)
@@ -731,7 +731,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
                cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
                c->x86          = x86_family(tfms);
                c->x86_model    = x86_model(tfms);
-               c->x86_mask     = x86_stepping(tfms);
+               c->x86_stepping = x86_stepping(tfms);
 
                if (cap0 & (1<<19)) {
                        c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
@@ -1184,9 +1184,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        int i;
 
        c->loops_per_jiffy = loops_per_jiffy;
-       c->x86_cache_size = -1;
+       c->x86_cache_size = 0;
        c->x86_vendor = X86_VENDOR_UNKNOWN;
-       c->x86_model = c->x86_mask = 0; /* So far unknown... */
+       c->x86_model = c->x86_stepping = 0;     /* So far unknown... */
        c->x86_vendor_id[0] = '\0'; /* Unset */
        c->x86_model_id[0] = '\0';  /* Unset */
        c->x86_max_cores = 1;
@@ -1378,8 +1378,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
 
        pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
 
-       if (c->x86_mask || c->cpuid_level >= 0)
-               pr_cont(", stepping: 0x%x)\n", c->x86_mask);
+       if (c->x86_stepping || c->cpuid_level >= 0)
+               pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
        else
                pr_cont(")\n");
 }
index 6b4bb335641f3f039c7cff50190ad38b5cf9de66..8949b7ae6d92536c1bbff659d7463588d2bdfb06 100644 (file)
@@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
 
        /* common case step number/rev -- exceptions handled below */
        c->x86_model = (dir1 >> 4) + 1;
-       c->x86_mask = dir1 & 0xf;
+       c->x86_stepping = dir1 & 0xf;
 
        /* Now cook; the original recipe is by Channing Corn, from Cyrix.
         * We do the same thing for each generation: we work out
index 319bf989fad1e1f3d7ed2234090a51c55a0067c6..d19e903214b403289aaf304eba85cc585c100c5e 100644 (file)
@@ -116,14 +116,13 @@ struct sku_microcode {
        u32 microcode;
 };
 static const struct sku_microcode spectre_bad_microcodes[] = {
-       { INTEL_FAM6_KABYLAKE_DESKTOP,  0x0B,   0x84 },
-       { INTEL_FAM6_KABYLAKE_DESKTOP,  0x0A,   0x84 },
-       { INTEL_FAM6_KABYLAKE_DESKTOP,  0x09,   0x84 },
-       { INTEL_FAM6_KABYLAKE_MOBILE,   0x0A,   0x84 },
-       { INTEL_FAM6_KABYLAKE_MOBILE,   0x09,   0x84 },
+       { INTEL_FAM6_KABYLAKE_DESKTOP,  0x0B,   0x80 },
+       { INTEL_FAM6_KABYLAKE_DESKTOP,  0x0A,   0x80 },
+       { INTEL_FAM6_KABYLAKE_DESKTOP,  0x09,   0x80 },
+       { INTEL_FAM6_KABYLAKE_MOBILE,   0x0A,   0x80 },
+       { INTEL_FAM6_KABYLAKE_MOBILE,   0x09,   0x80 },
        { INTEL_FAM6_SKYLAKE_X,         0x03,   0x0100013e },
        { INTEL_FAM6_SKYLAKE_X,         0x04,   0x0200003c },
-       { INTEL_FAM6_SKYLAKE_MOBILE,    0x03,   0xc2 },
        { INTEL_FAM6_SKYLAKE_DESKTOP,   0x03,   0xc2 },
        { INTEL_FAM6_BROADWELL_CORE,    0x04,   0x28 },
        { INTEL_FAM6_BROADWELL_GT3E,    0x01,   0x1b },
@@ -136,8 +135,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
        { INTEL_FAM6_HASWELL_X,         0x02,   0x3b },
        { INTEL_FAM6_HASWELL_X,         0x04,   0x10 },
        { INTEL_FAM6_IVYBRIDGE_X,       0x04,   0x42a },
-       /* Updated in the 20180108 release; blacklist until we know otherwise */
-       { INTEL_FAM6_ATOM_GEMINI_LAKE,  0x01,   0x22 },
        /* Observed in the wild */
        { INTEL_FAM6_SANDYBRIDGE_X,     0x06,   0x61b },
        { INTEL_FAM6_SANDYBRIDGE_X,     0x07,   0x712 },
@@ -149,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
 
        for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
                if (c->x86_model == spectre_bad_microcodes[i].model &&
-                   c->x86_mask == spectre_bad_microcodes[i].stepping)
+                   c->x86_stepping == spectre_bad_microcodes[i].stepping)
                        return (c->microcode <= spectre_bad_microcodes[i].microcode);
        }
        return false;
@@ -196,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
         * need the microcode to have already been loaded... so if it is
         * not, recommend a BIOS update and disable large pages.
         */
-       if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
+       if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
            c->microcode < 0x20e) {
                pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
                clear_cpu_cap(c, X86_FEATURE_PSE);
@@ -212,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
 
        /* CPUID workaround for 0F33/0F34 CPU */
        if (c->x86 == 0xF && c->x86_model == 0x3
-           && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
+           && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
                c->x86_phys_bits = 36;
 
        /*
@@ -310,7 +307,7 @@ int ppro_with_ram_bug(void)
        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
            boot_cpu_data.x86 == 6 &&
            boot_cpu_data.x86_model == 1 &&
-           boot_cpu_data.x86_mask < 8) {
+           boot_cpu_data.x86_stepping < 8) {
                pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
                return 1;
        }
@@ -327,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
         * Mask B, Pentium, but not Pentium MMX
         */
        if (c->x86 == 5 &&
-           c->x86_mask >= 1 && c->x86_mask <= 4 &&
+           c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
            c->x86_model <= 3) {
                /*
                 * Remember we have B step Pentia with bugs
@@ -370,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
         * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
         * model 3 mask 3
         */
-       if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
+       if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
                clear_cpu_cap(c, X86_FEATURE_SEP);
 
        /*
@@ -388,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
         * P4 Xeon erratum 037 workaround.
         * Hardware prefetcher may cause stale data to be loaded into the cache.
         */
-       if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
+       if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
                if (msr_set_bit(MSR_IA32_MISC_ENABLE,
                                MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
                        pr_info("CPU: C0 stepping P4 Xeon detected.\n");
@@ -403,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
         * Specification Update").
         */
        if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
-           (c->x86_mask < 0x6 || c->x86_mask == 0xb))
+           (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
                set_cpu_bug(c, X86_BUG_11AP);
 
 
@@ -650,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c)
                case 6:
                        if (l2 == 128)
                                p = "Celeron (Mendocino)";
-                       else if (c->x86_mask == 0 || c->x86_mask == 5)
+                       else if (c->x86_stepping == 0 || c->x86_stepping == 5)
                                p = "Celeron-A";
                        break;
 
index 410629f10ad377176787b1b93ddb36625de2be36..589b948e6e01f01d7388cca456fbecd6019dbca4 100644 (file)
@@ -819,7 +819,7 @@ static __init void rdt_quirks(void)
                        cache_alloc_hsw_probe();
                break;
        case INTEL_FAM6_SKYLAKE_X:
-               if (boot_cpu_data.x86_mask <= 4)
+               if (boot_cpu_data.x86_stepping <= 4)
                        set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
        }
 }
index aa0d5df9dc60e710b22ab7172f0e5fd6e05db2c9..e956eb26706191d27447bc9feec2e9fbde5310c7 100644 (file)
@@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb)        { }
 
 extern struct mca_config mca_cfg;
 
+#ifndef CONFIG_X86_64
+/*
+ * On 32-bit systems it would be difficult to safely unmap a poison page
+ * from the kernel 1:1 map because there are no non-canonical addresses that
+ * we can use to refer to the address without risking a speculative access.
+ * However, this isn't much of an issue because:
+ * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
+ *    are only mapped into the kernel as needed
+ * 2) Few people would run a 32-bit kernel on a machine that supports
+ *    recoverable errors because they have too much memory to boot 32-bit.
+ */
+static inline void mce_unmap_kpfn(unsigned long pfn) {}
+#define mce_unmap_kpfn mce_unmap_kpfn
+#endif
+
 #endif /* __X86_MCE_INTERNAL_H__ */
index 3a8e88a611ebf99d70c2cacf2d866343f01a4a1a..8ff94d1e2dce54e87cc72c63812365d610476ec8 100644 (file)
@@ -105,6 +105,10 @@ static struct irq_work mce_irq_work;
 
 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
 
+#ifndef mce_unmap_kpfn
+static void mce_unmap_kpfn(unsigned long pfn);
+#endif
+
 /*
  * CPU/chipset specific EDAC code can register a notifier call here to print
  * MCE errors in a human-readable form.
@@ -234,7 +238,7 @@ static void __print_mce(struct mce *m)
                        m->cs, m->ip);
 
                if (m->cs == __KERNEL_CS)
-                       pr_cont("{%pS}", (void *)m->ip);
+                       pr_cont("{%pS}", (void *)(unsigned long)m->ip);
                pr_cont("\n");
        }
 
@@ -590,7 +594,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
 
        if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
                pfn = mce->addr >> PAGE_SHIFT;
-               memory_failure(pfn, 0);
+               if (!memory_failure(pfn, 0))
+                       mce_unmap_kpfn(pfn);
        }
 
        return NOTIFY_OK;
@@ -1057,12 +1062,13 @@ static int do_memory_failure(struct mce *m)
        ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
        if (ret)
                pr_err("Memory error not recovered");
+       else
+               mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
        return ret;
 }
 
-#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE)
-
-void arch_unmap_kpfn(unsigned long pfn)
+#ifndef mce_unmap_kpfn
+static void mce_unmap_kpfn(unsigned long pfn)
 {
        unsigned long decoy_addr;
 
@@ -1073,7 +1079,7 @@ void arch_unmap_kpfn(unsigned long pfn)
         * We would like to just call:
         *      set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
         * but doing that would radically increase the odds of a
-        * speculative access to the posion page because we'd have
+        * speculative access to the poison page because we'd have
         * the virtual address of the kernel 1:1 mapping sitting
         * around in registers.
         * Instead we get tricky.  We create a non-canonical address
@@ -1098,7 +1104,6 @@ void arch_unmap_kpfn(unsigned long pfn)
 
        if (set_memory_np(decoy_addr, 1))
                pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
-
 }
 #endif
 
index f7c55b0e753ad038332307b2c7347abd1dcbc670..a15db2b4e0d66a8b5c4d2468359eeafb85401151 100644 (file)
@@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu)
         */
        if (c->x86 == 6 &&
            c->x86_model == INTEL_FAM6_BROADWELL_X &&
-           c->x86_mask == 0x01 &&
+           c->x86_stepping == 0x01 &&
            llc_size_per_core > 2621440 &&
            c->microcode < 0x0b000021) {
                pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
@@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
                return UCODE_NFOUND;
 
        sprintf(name, "intel-ucode/%02x-%02x-%02x",
-               c->x86, c->x86_model, c->x86_mask);
+               c->x86, c->x86_model, c->x86_stepping);
 
        if (request_firmware_direct(&firmware, name, device)) {
                pr_debug("data file %s load failed\n", name);
@@ -982,7 +982,7 @@ static struct microcode_ops microcode_intel_ops = {
 
 static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
 {
-       u64 llc_size = c->x86_cache_size * 1024;
+       u64 llc_size = c->x86_cache_size * 1024ULL;
 
        do_div(llc_size, c->x86_max_cores);
 
index fdc55215d44d08b8c170767f94e68c4e60c09079..e12ee86906c6250faa05b13ae9bb9c3dc545558f 100644 (file)
@@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
         */
        if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
            boot_cpu_data.x86_model == 1 &&
-           boot_cpu_data.x86_mask <= 7) {
+           boot_cpu_data.x86_stepping <= 7) {
                if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
                        pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
                        return -EINVAL;
index 40d5a8a752125ed5d26a7605d5eabad572879bfc..7468de4290873ad4664a5575e6cc0f4d74a26ae6 100644 (file)
@@ -711,8 +711,8 @@ void __init mtrr_bp_init(void)
                        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
                            boot_cpu_data.x86 == 0xF &&
                            boot_cpu_data.x86_model == 0x3 &&
-                           (boot_cpu_data.x86_mask == 0x3 ||
-                            boot_cpu_data.x86_mask == 0x4))
+                           (boot_cpu_data.x86_stepping == 0x3 ||
+                            boot_cpu_data.x86_stepping == 0x4))
                                phys_addr = 36;
 
                        size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
index e7ecedafa1c8f7b033eccbdcf24198089933b386..2c8522a39ed5dbc388bada821ed144f2435adac2 100644 (file)
@@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                   c->x86_model,
                   c->x86_model_id[0] ? c->x86_model_id : "unknown");
 
-       if (c->x86_mask || c->cpuid_level >= 0)
-               seq_printf(m, "stepping\t: %d\n", c->x86_mask);
+       if (c->x86_stepping || c->cpuid_level >= 0)
+               seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
        else
                seq_puts(m, "stepping\t: unknown\n");
        if (c->microcode)
@@ -91,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        }
 
        /* Cache size */
-       if (c->x86_cache_size >= 0)
-               seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
+       if (c->x86_cache_size)
+               seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
 
        show_cpuinfo_core(m, c, cpu);
        show_cpuinfo_misc(m, c);
index c29020907886a32d1ca40e9beb858c32dfdefd0f..b59e4fb40fd9986c0cc6b629b4c7a3a18a6d23b4 100644 (file)
@@ -37,7 +37,7 @@
 #define X86            new_cpu_data+CPUINFO_x86
 #define X86_VENDOR     new_cpu_data+CPUINFO_x86_vendor
 #define X86_MODEL      new_cpu_data+CPUINFO_x86_model
-#define X86_MASK       new_cpu_data+CPUINFO_x86_mask
+#define X86_STEPPING   new_cpu_data+CPUINFO_x86_stepping
 #define X86_HARD_MATH  new_cpu_data+CPUINFO_hard_math
 #define X86_CPUID      new_cpu_data+CPUINFO_cpuid_level
 #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
@@ -332,7 +332,7 @@ ENTRY(startup_32_smp)
        shrb $4,%al
        movb %al,X86_MODEL
        andb $0x0f,%cl          # mask mask revision
-       movb %cl,X86_MASK
+       movb %cl,X86_STEPPING
        movl %edx,X86_CAPABILITY
 
 .Lis486:
index 27d0a1712663673ac9993a6ddd055cb075b265fa..f1c5eb99d445407a9fc134e76a8010d17a61d780 100644 (file)
@@ -410,7 +410,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
        processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
        processor.cpuflag = CPU_ENABLED;
        processor.cpufeature = (boot_cpu_data.x86 << 8) |
-           (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
+           (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
        processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
        processor.reserved[0] = 0;
        processor.reserved[1] = 0;
index 041096bdef860d356d58873e3e0483384ee98301..99dc79e76bdc5497c8e07c6ee32e74ffe04ff492 100644 (file)
@@ -200,9 +200,9 @@ static void native_flush_tlb_global(void)
        __native_flush_tlb_global();
 }
 
-static void native_flush_tlb_single(unsigned long addr)
+static void native_flush_tlb_one_user(unsigned long addr)
 {
-       __native_flush_tlb_single(addr);
+       __native_flush_tlb_one_user(addr);
 }
 
 struct static_key paravirt_steal_enabled;
@@ -401,7 +401,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
 
        .flush_tlb_user = native_flush_tlb,
        .flush_tlb_kernel = native_flush_tlb_global,
-       .flush_tlb_single = native_flush_tlb_single,
+       .flush_tlb_one_user = native_flush_tlb_one_user,
        .flush_tlb_others = native_flush_tlb_others,
 
        .pgd_alloc = __paravirt_pgd_alloc,
index 6f27facbaa9b03bd32da7f0c5edca4f1014aa320..9eee25d07586c6a310b16e3471efc02e501f93c7 100644 (file)
@@ -1281,11 +1281,10 @@ void __init native_smp_prepare_boot_cpu(void)
        cpu_set_state_online(me);
 }
 
-void __init native_smp_cpus_done(unsigned int max_cpus)
+void __init calculate_max_logical_packages(void)
 {
        int ncpus;
 
-       pr_debug("Boot done\n");
        /*
         * Today neither Intel nor AMD support heterogenous systems so
         * extrapolate the boot cpu's data to all packages.
@@ -1293,6 +1292,13 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
        ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
        __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
        pr_info("Max logical packages: %u\n", __max_logical_packages);
+}
+
+void __init native_smp_cpus_done(unsigned int max_cpus)
+{
+       pr_debug("Boot done\n");
+
+       calculate_max_logical_packages();
 
        if (x86_has_numa_in_package)
                set_sched_topology(x86_numa_in_package_topology);
@@ -1430,7 +1436,6 @@ static void remove_siblinginfo(int cpu)
        cpumask_clear(cpu_llc_shared_mask(cpu));
        cpumask_clear(topology_sibling_cpumask(cpu));
        cpumask_clear(topology_core_cpumask(cpu));
-       c->phys_proc_id = 0;
        c->cpu_core_id = 0;
        cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
        recompute_smt_state();
index 446c9ef8cfc32b68d77d4429128b3c794b3fc069..3d9b2308e7fad0be6f90cbf834d2e00d1f4fc417 100644 (file)
@@ -181,7 +181,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr)
                break;
 
        case BUG_TRAP_TYPE_WARN:
-               regs->ip += LEN_UD0;
+               regs->ip += LEN_UD2;
                return 1;
        }
 
index 8eca1d04aeb86d309d7242ec2baf6c0153f79068..46ff304140c71fad1fa818324c0cda017257d2a5 100644 (file)
@@ -5080,7 +5080,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
 typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
 
 /* The caller should hold mmu-lock before calling this function. */
-static bool
+static __always_inline bool
 slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
                        slot_level_handler fn, int start_level, int end_level,
                        gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
@@ -5110,7 +5110,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
        return flush;
 }
 
-static bool
+static __always_inline bool
 slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
                  slot_level_handler fn, int start_level, int end_level,
                  bool lock_flush_tlb)
@@ -5121,7 +5121,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
                        lock_flush_tlb);
 }
 
-static bool
+static __always_inline bool
 slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
                      slot_level_handler fn, bool lock_flush_tlb)
 {
@@ -5129,7 +5129,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
                                 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
 }
 
-static bool
+static __always_inline bool
 slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
                        slot_level_handler fn, bool lock_flush_tlb)
 {
@@ -5137,7 +5137,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
                                 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
 }
 
-static bool
+static __always_inline bool
 slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
                 slot_level_handler fn, bool lock_flush_tlb)
 {
index f427723dc7db34fab153b4faecbbb767b48f7e06..3dec126aa3022eb11f49d5de695d3658183a48ee 100644 (file)
@@ -10136,7 +10136,10 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
                        (unsigned long)(vmcs12->posted_intr_desc_addr &
                        (PAGE_SIZE - 1)));
        }
-       if (!nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
+       if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
+               vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
+                             CPU_BASED_USE_MSR_BITMAPS);
+       else
                vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
                                CPU_BASED_USE_MSR_BITMAPS);
 }
@@ -10224,8 +10227,8 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
         *    updated to reflect this when L1 (or its L2s) actually write to
         *    the MSR.
         */
-       bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
-       bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
+       bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
+       bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
 
        /* Nothing to do if the MSR bitmap is not in use.  */
        if (!cpu_has_vmx_msr_bitmap() ||
index d6f848d1211d4b2deaf193fca80fb4834310dff4..2dd1fe13a37b36aacfeca12733178f62a89ba309 100644 (file)
@@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
 {
        unsigned int fam, model;
 
-        fam = x86_family(sig);
+       fam = x86_family(sig);
 
        model = (sig >> 4) & 0xf;
 
index 7b881d03d0ddd934f674343519159be1fe48a2cc..3cdf06128d13c11b60ca2aa7cadf32af03703020 100644 (file)
@@ -7,6 +7,7 @@ asmlinkage void just_return_func(void);
 
 asm(
        ".type just_return_func, @function\n"
+       ".globl just_return_func\n"
        "just_return_func:\n"
        "       ret\n"
        ".size just_return_func, .-just_return_func\n"
index 1ab42c8520693c9999e4b19d04eb565528404ed7..8b72923f1d35c07c5ded42ae36873790da02d247 100644 (file)
@@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
         * It's enough to flush this one mapping.
         * (PGE mappings get flushed as well)
         */
-       __flush_tlb_one(vaddr);
+       __flush_tlb_one_kernel(vaddr);
 }
 
 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
@@ -1193,8 +1193,8 @@ void __init mem_init(void)
        register_page_bootmem_info();
 
        /* Register memory areas for /proc/kcore */
-       kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
-                        PAGE_SIZE, KCORE_OTHER);
+       if (get_gate_vma(&init_mm))
+               kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
 
        mem_init_print_info(NULL);
 }
index c45b6ec5357bcd2e9f6626bd738c700cccd0a173..e2db83bebc3b71a7842404b938f3bb1c2f1e7fae 100644 (file)
@@ -820,5 +820,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
        else
                pte_clear(&init_mm, addr, pte);
-       __flush_tlb_one(addr);
+       __flush_tlb_one_kernel(addr);
 }
index 58477ec3d66d08acf07c1bc21bb9a55a78fcaa28..7c868670963617865d2e9fb494a08b04a82b49cb 100644 (file)
@@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
                return -1;
        }
 
-       __flush_tlb_one(f->addr);
+       __flush_tlb_one_kernel(f->addr);
        return 0;
 }
 
index c3c5274410a908e762aed936406006d63c3116ac..9bb7f0ab9fe625b77c64bec89b2a0ff9cac222bf 100644 (file)
@@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
         * It's enough to flush this one mapping.
         * (PGE mappings get flushed as well)
         */
-       __flush_tlb_one(vaddr);
+       __flush_tlb_one_kernel(vaddr);
 }
 
 unsigned long __FIXADDR_TOP = 0xfffff000;
index 8dcc0607f80584748f92fe43aba9a32685fc6f9b..7f1a51399674b1da34cac674b94b66043f11a1ea 100644 (file)
@@ -498,7 +498,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
         *    flush that changes context.tlb_gen from 2 to 3.  If they get
         *    processed on this CPU in reverse order, we'll see
         *     local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
-        *    If we were to use __flush_tlb_single() and set local_tlb_gen to
+        *    If we were to use __flush_tlb_one_user() and set local_tlb_gen to
         *    3, we'd be break the invariant: we'd update local_tlb_gen above
         *    1 without the full flush that's needed for tlb_gen 2.
         *
@@ -519,7 +519,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
 
                addr = f->start;
                while (addr < f->end) {
-                       __flush_tlb_single(addr);
+                       __flush_tlb_one_user(addr);
                        addr += PAGE_SIZE;
                }
                if (local)
@@ -666,7 +666,7 @@ static void do_kernel_range_flush(void *info)
 
        /* flush range by one by one 'invlpg' */
        for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
-               __flush_tlb_one(addr);
+               __flush_tlb_one_kernel(addr);
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
index c2e9285d1bf11539ce7efe9c3a683467368c9cbb..db77e087adaf874f6556f5f1d0cb5bdcaf54c42f 100644 (file)
@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
                local_flush_tlb();
                stat->d_alltlb++;
        } else {
-               __flush_tlb_single(msg->address);
+               __flush_tlb_one_user(msg->address);
                stat->d_onetlb++;
        }
        stat->d_requestee++;
index d85076223a696d0b00bee7c22a9e28b1e66dc975..aae88fec9941a6dd4183ff2e85f43036e0c2aa88 100644 (file)
@@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void)
        preempt_enable();
 }
 
-static void xen_flush_tlb_single(unsigned long addr)
+static void xen_flush_tlb_one_user(unsigned long addr)
 {
        struct mmuext_op *op;
        struct multicall_space mcs;
 
-       trace_xen_mmu_flush_tlb_single(addr);
+       trace_xen_mmu_flush_tlb_one_user(addr);
 
        preempt_disable();
 
@@ -2370,7 +2370,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
 
        .flush_tlb_user = xen_flush_tlb,
        .flush_tlb_kernel = xen_flush_tlb,
-       .flush_tlb_single = xen_flush_tlb_single,
+       .flush_tlb_one_user = xen_flush_tlb_one_user,
        .flush_tlb_others = xen_flush_tlb_others,
 
        .pgd_alloc = xen_pgd_alloc,
index 77c959cf81e7c3fb573066513067c47f2698fedc..7a43b2ae19f1228b247e57fe453d21ab15e116f7 100644 (file)
@@ -122,6 +122,8 @@ void __init xen_smp_cpus_done(unsigned int max_cpus)
 
        if (xen_hvm_domain())
                native_smp_cpus_done(max_cpus);
+       else
+               calculate_max_logical_packages();
 
        if (xen_have_vcpu_info_placement)
                return;
index df93102e21494dc7f98456e4376ce7c83fca56ab..357492712b0ea85362d2bbd6b2aa1687459cba66 100644 (file)
@@ -3164,6 +3164,7 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
                cpu_relax();
        }
 
+       __set_current_state(TASK_RUNNING);
        return false;
 }
 
index a965b9d8055983af4353ae39fe676b7789c8f93f..ded1487833033a602b7736ca6d78967517153dae 100644 (file)
 #include <crypto/sha3.h>
 #include <asm/unaligned.h>
 
+/*
+ * On some 32-bit architectures (mn10300 and h8300), GCC ends up using
+ * over 1 KB of stack if we inline the round calculation into the loop
+ * in keccakf(). On the other hand, on 64-bit architectures with plenty
+ * of [64-bit wide] general purpose registers, not inlining it severely
+ * hurts performance. So let's use 64-bitness as a heuristic to decide
+ * whether to inline or not.
+ */
+#ifdef CONFIG_64BIT
+#define SHA3_INLINE    inline
+#else
+#define SHA3_INLINE    noinline
+#endif
+
 #define KECCAK_ROUNDS 24
 
 static const u64 keccakf_rndc[24] = {
@@ -35,111 +49,115 @@ static const u64 keccakf_rndc[24] = {
 
 /* update the state with given number of rounds */
 
-static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25])
+static SHA3_INLINE void keccakf_round(u64 st[25])
 {
        u64 t[5], tt, bc[5];
-       int round;
 
-       for (round = 0; round < KECCAK_ROUNDS; round++) {
+       /* Theta */
+       bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
+       bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
+       bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
+       bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
+       bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
+
+       t[0] = bc[4] ^ rol64(bc[1], 1);
+       t[1] = bc[0] ^ rol64(bc[2], 1);
+       t[2] = bc[1] ^ rol64(bc[3], 1);
+       t[3] = bc[2] ^ rol64(bc[4], 1);
+       t[4] = bc[3] ^ rol64(bc[0], 1);
+
+       st[0] ^= t[0];
+
+       /* Rho Pi */
+       tt = st[1];
+       st[ 1] = rol64(st[ 6] ^ t[1], 44);
+       st[ 6] = rol64(st[ 9] ^ t[4], 20);
+       st[ 9] = rol64(st[22] ^ t[2], 61);
+       st[22] = rol64(st[14] ^ t[4], 39);
+       st[14] = rol64(st[20] ^ t[0], 18);
+       st[20] = rol64(st[ 2] ^ t[2], 62);
+       st[ 2] = rol64(st[12] ^ t[2], 43);
+       st[12] = rol64(st[13] ^ t[3], 25);
+       st[13] = rol64(st[19] ^ t[4],  8);
+       st[19] = rol64(st[23] ^ t[3], 56);
+       st[23] = rol64(st[15] ^ t[0], 41);
+       st[15] = rol64(st[ 4] ^ t[4], 27);
+       st[ 4] = rol64(st[24] ^ t[4], 14);
+       st[24] = rol64(st[21] ^ t[1],  2);
+       st[21] = rol64(st[ 8] ^ t[3], 55);
+       st[ 8] = rol64(st[16] ^ t[1], 45);
+       st[16] = rol64(st[ 5] ^ t[0], 36);
+       st[ 5] = rol64(st[ 3] ^ t[3], 28);
+       st[ 3] = rol64(st[18] ^ t[3], 21);
+       st[18] = rol64(st[17] ^ t[2], 15);
+       st[17] = rol64(st[11] ^ t[1], 10);
+       st[11] = rol64(st[ 7] ^ t[2],  6);
+       st[ 7] = rol64(st[10] ^ t[0],  3);
+       st[10] = rol64(    tt ^ t[1],  1);
+
+       /* Chi */
+       bc[ 0] = ~st[ 1] & st[ 2];
+       bc[ 1] = ~st[ 2] & st[ 3];
+       bc[ 2] = ~st[ 3] & st[ 4];
+       bc[ 3] = ~st[ 4] & st[ 0];
+       bc[ 4] = ~st[ 0] & st[ 1];
+       st[ 0] ^= bc[ 0];
+       st[ 1] ^= bc[ 1];
+       st[ 2] ^= bc[ 2];
+       st[ 3] ^= bc[ 3];
+       st[ 4] ^= bc[ 4];
+
+       bc[ 0] = ~st[ 6] & st[ 7];
+       bc[ 1] = ~st[ 7] & st[ 8];
+       bc[ 2] = ~st[ 8] & st[ 9];
+       bc[ 3] = ~st[ 9] & st[ 5];
+       bc[ 4] = ~st[ 5] & st[ 6];
+       st[ 5] ^= bc[ 0];
+       st[ 6] ^= bc[ 1];
+       st[ 7] ^= bc[ 2];
+       st[ 8] ^= bc[ 3];
+       st[ 9] ^= bc[ 4];
+
+       bc[ 0] = ~st[11] & st[12];
+       bc[ 1] = ~st[12] & st[13];
+       bc[ 2] = ~st[13] & st[14];
+       bc[ 3] = ~st[14] & st[10];
+       bc[ 4] = ~st[10] & st[11];
+       st[10] ^= bc[ 0];
+       st[11] ^= bc[ 1];
+       st[12] ^= bc[ 2];
+       st[13] ^= bc[ 3];
+       st[14] ^= bc[ 4];
+
+       bc[ 0] = ~st[16] & st[17];
+       bc[ 1] = ~st[17] & st[18];
+       bc[ 2] = ~st[18] & st[19];
+       bc[ 3] = ~st[19] & st[15];
+       bc[ 4] = ~st[15] & st[16];
+       st[15] ^= bc[ 0];
+       st[16] ^= bc[ 1];
+       st[17] ^= bc[ 2];
+       st[18] ^= bc[ 3];
+       st[19] ^= bc[ 4];
+
+       bc[ 0] = ~st[21] & st[22];
+       bc[ 1] = ~st[22] & st[23];
+       bc[ 2] = ~st[23] & st[24];
+       bc[ 3] = ~st[24] & st[20];
+       bc[ 4] = ~st[20] & st[21];
+       st[20] ^= bc[ 0];
+       st[21] ^= bc[ 1];
+       st[22] ^= bc[ 2];
+       st[23] ^= bc[ 3];
+       st[24] ^= bc[ 4];
+}
 
-               /* Theta */
-               bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
-               bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
-               bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
-               bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
-               bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
-
-               t[0] = bc[4] ^ rol64(bc[1], 1);
-               t[1] = bc[0] ^ rol64(bc[2], 1);
-               t[2] = bc[1] ^ rol64(bc[3], 1);
-               t[3] = bc[2] ^ rol64(bc[4], 1);
-               t[4] = bc[3] ^ rol64(bc[0], 1);
-
-               st[0] ^= t[0];
-
-               /* Rho Pi */
-               tt = st[1];
-               st[ 1] = rol64(st[ 6] ^ t[1], 44);
-               st[ 6] = rol64(st[ 9] ^ t[4], 20);
-               st[ 9] = rol64(st[22] ^ t[2], 61);
-               st[22] = rol64(st[14] ^ t[4], 39);
-               st[14] = rol64(st[20] ^ t[0], 18);
-               st[20] = rol64(st[ 2] ^ t[2], 62);
-               st[ 2] = rol64(st[12] ^ t[2], 43);
-               st[12] = rol64(st[13] ^ t[3], 25);
-               st[13] = rol64(st[19] ^ t[4],  8);
-               st[19] = rol64(st[23] ^ t[3], 56);
-               st[23] = rol64(st[15] ^ t[0], 41);
-               st[15] = rol64(st[ 4] ^ t[4], 27);
-               st[ 4] = rol64(st[24] ^ t[4], 14);
-               st[24] = rol64(st[21] ^ t[1],  2);
-               st[21] = rol64(st[ 8] ^ t[3], 55);
-               st[ 8] = rol64(st[16] ^ t[1], 45);
-               st[16] = rol64(st[ 5] ^ t[0], 36);
-               st[ 5] = rol64(st[ 3] ^ t[3], 28);
-               st[ 3] = rol64(st[18] ^ t[3], 21);
-               st[18] = rol64(st[17] ^ t[2], 15);
-               st[17] = rol64(st[11] ^ t[1], 10);
-               st[11] = rol64(st[ 7] ^ t[2],  6);
-               st[ 7] = rol64(st[10] ^ t[0],  3);
-               st[10] = rol64(    tt ^ t[1],  1);
-
-               /* Chi */
-               bc[ 0] = ~st[ 1] & st[ 2];
-               bc[ 1] = ~st[ 2] & st[ 3];
-               bc[ 2] = ~st[ 3] & st[ 4];
-               bc[ 3] = ~st[ 4] & st[ 0];
-               bc[ 4] = ~st[ 0] & st[ 1];
-               st[ 0] ^= bc[ 0];
-               st[ 1] ^= bc[ 1];
-               st[ 2] ^= bc[ 2];
-               st[ 3] ^= bc[ 3];
-               st[ 4] ^= bc[ 4];
-
-               bc[ 0] = ~st[ 6] & st[ 7];
-               bc[ 1] = ~st[ 7] & st[ 8];
-               bc[ 2] = ~st[ 8] & st[ 9];
-               bc[ 3] = ~st[ 9] & st[ 5];
-               bc[ 4] = ~st[ 5] & st[ 6];
-               st[ 5] ^= bc[ 0];
-               st[ 6] ^= bc[ 1];
-               st[ 7] ^= bc[ 2];
-               st[ 8] ^= bc[ 3];
-               st[ 9] ^= bc[ 4];
-
-               bc[ 0] = ~st[11] & st[12];
-               bc[ 1] = ~st[12] & st[13];
-               bc[ 2] = ~st[13] & st[14];
-               bc[ 3] = ~st[14] & st[10];
-               bc[ 4] = ~st[10] & st[11];
-               st[10] ^= bc[ 0];
-               st[11] ^= bc[ 1];
-               st[12] ^= bc[ 2];
-               st[13] ^= bc[ 3];
-               st[14] ^= bc[ 4];
-
-               bc[ 0] = ~st[16] & st[17];
-               bc[ 1] = ~st[17] & st[18];
-               bc[ 2] = ~st[18] & st[19];
-               bc[ 3] = ~st[19] & st[15];
-               bc[ 4] = ~st[15] & st[16];
-               st[15] ^= bc[ 0];
-               st[16] ^= bc[ 1];
-               st[17] ^= bc[ 2];
-               st[18] ^= bc[ 3];
-               st[19] ^= bc[ 4];
-
-               bc[ 0] = ~st[21] & st[22];
-               bc[ 1] = ~st[22] & st[23];
-               bc[ 2] = ~st[23] & st[24];
-               bc[ 3] = ~st[24] & st[20];
-               bc[ 4] = ~st[20] & st[21];
-               st[20] ^= bc[ 0];
-               st[21] ^= bc[ 1];
-               st[22] ^= bc[ 2];
-               st[23] ^= bc[ 3];
-               st[24] ^= bc[ 4];
+static void __optimize("O3") keccakf(u64 st[25])
+{
+       int round;
 
+       for (round = 0; round < KECCAK_ROUNDS; round++) {
+               keccakf_round(st);
                /* Iota */
                st[0] ^= keccakf_rndc[round];
        }
index 676c9788e1c8287b5e16311fa3159342702ae637..0dad0bd9327b5ac2338f0c8b1e4c021532a47196 100644 (file)
@@ -660,13 +660,15 @@ struct acpi_device *acpi_companion_match(const struct device *dev)
  * acpi_of_match_device - Match device object using the "compatible" property.
  * @adev: ACPI device object to match.
  * @of_match_table: List of device IDs to match against.
+ * @of_id: OF ID if matched
  *
  * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
  * identifiers and a _DSD object with the "compatible" property, use that
  * property to match against the given list of identifiers.
  */
 static bool acpi_of_match_device(struct acpi_device *adev,
-                                const struct of_device_id *of_match_table)
+                                const struct of_device_id *of_match_table,
+                                const struct of_device_id **of_id)
 {
        const union acpi_object *of_compatible, *obj;
        int i, nval;
@@ -690,8 +692,11 @@ static bool acpi_of_match_device(struct acpi_device *adev,
                const struct of_device_id *id;
 
                for (id = of_match_table; id->compatible[0]; id++)
-                       if (!strcasecmp(obj->string.pointer, id->compatible))
+                       if (!strcasecmp(obj->string.pointer, id->compatible)) {
+                               if (of_id)
+                                       *of_id = id;
                                return true;
+                       }
        }
 
        return false;
@@ -762,10 +767,11 @@ static bool __acpi_match_device_cls(const struct acpi_device_id *id,
        return true;
 }
 
-static const struct acpi_device_id *__acpi_match_device(
-       struct acpi_device *device,
-       const struct acpi_device_id *ids,
-       const struct of_device_id *of_ids)
+static bool __acpi_match_device(struct acpi_device *device,
+                               const struct acpi_device_id *acpi_ids,
+                               const struct of_device_id *of_ids,
+                               const struct acpi_device_id **acpi_id,
+                               const struct of_device_id **of_id)
 {
        const struct acpi_device_id *id;
        struct acpi_hardware_id *hwid;
@@ -775,30 +781,32 @@ static const struct acpi_device_id *__acpi_match_device(
         * driver for it.
         */
        if (!device || !device->status.present)
-               return NULL;
+               return false;
 
        list_for_each_entry(hwid, &device->pnp.ids, list) {
                /* First, check the ACPI/PNP IDs provided by the caller. */
-               for (id = ids; id->id[0] || id->cls; id++) {
-                       if (id->id[0] && !strcmp((char *) id->id, hwid->id))
-                               return id;
-                       else if (id->cls && __acpi_match_device_cls(id, hwid))
-                               return id;
+               if (acpi_ids) {
+                       for (id = acpi_ids; id->id[0] || id->cls; id++) {
+                               if (id->id[0] && !strcmp((char *)id->id, hwid->id))
+                                       goto out_acpi_match;
+                               if (id->cls && __acpi_match_device_cls(id, hwid))
+                                       goto out_acpi_match;
+                       }
                }
 
                /*
                 * Next, check ACPI_DT_NAMESPACE_HID and try to match the
                 * "compatible" property if found.
-                *
-                * The id returned by the below is not valid, but the only
-                * caller passing non-NULL of_ids here is only interested in
-                * whether or not the return value is NULL.
                 */
-               if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
-                   && acpi_of_match_device(device, of_ids))
-                       return id;
+               if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id))
+                       return acpi_of_match_device(device, of_ids, of_id);
        }
-       return NULL;
+       return false;
+
+out_acpi_match:
+       if (acpi_id)
+               *acpi_id = id;
+       return true;
 }
 
 /**
@@ -815,32 +823,29 @@ static const struct acpi_device_id *__acpi_match_device(
 const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
                                               const struct device *dev)
 {
-       return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
+       const struct acpi_device_id *id = NULL;
+
+       __acpi_match_device(acpi_companion_match(dev), ids, NULL, &id, NULL);
+       return id;
 }
 EXPORT_SYMBOL_GPL(acpi_match_device);
 
-void *acpi_get_match_data(const struct device *dev)
+const void *acpi_device_get_match_data(const struct device *dev)
 {
        const struct acpi_device_id *match;
 
-       if (!dev->driver)
-               return NULL;
-
-       if (!dev->driver->acpi_match_table)
-               return NULL;
-
        match = acpi_match_device(dev->driver->acpi_match_table, dev);
        if (!match)
                return NULL;
 
-       return (void *)match->driver_data;
+       return (const void *)match->driver_data;
 }
-EXPORT_SYMBOL_GPL(acpi_get_match_data);
+EXPORT_SYMBOL_GPL(acpi_device_get_match_data);
 
 int acpi_match_device_ids(struct acpi_device *device,
                          const struct acpi_device_id *ids)
 {
-       return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
+       return __acpi_match_device(device, ids, NULL, NULL, NULL) ? 0 : -ENOENT;
 }
 EXPORT_SYMBOL(acpi_match_device_ids);
 
@@ -849,10 +854,12 @@ bool acpi_driver_match_device(struct device *dev,
 {
        if (!drv->acpi_match_table)
                return acpi_of_match_device(ACPI_COMPANION(dev),
-                                           drv->of_match_table);
+                                           drv->of_match_table,
+                                           NULL);
 
-       return !!__acpi_match_device(acpi_companion_match(dev),
-                                    drv->acpi_match_table, drv->of_match_table);
+       return __acpi_match_device(acpi_companion_match(dev),
+                                  drv->acpi_match_table, drv->of_match_table,
+                                  NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(acpi_driver_match_device);
 
index d9f38c645e4a1dc776b783dc1eae7aeb28418873..30a5729565575f83cb02700ac2050f35abab5e5d 100644 (file)
@@ -1927,6 +1927,9 @@ static int acpi_ec_suspend_noirq(struct device *dev)
            ec->reference_count >= 1)
                acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
 
+       if (acpi_sleep_no_ec_events())
+               acpi_ec_enter_noirq(ec);
+
        return 0;
 }
 
@@ -1934,6 +1937,9 @@ static int acpi_ec_resume_noirq(struct device *dev)
 {
        struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
 
+       if (acpi_sleep_no_ec_events())
+               acpi_ec_leave_noirq(ec);
+
        if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
            ec->reference_count >= 1)
                acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
index 466d1503aba0e31f26b297df2532561a03c12ebd..5815356ea6ad3fb584a63e4ebf076d09fda635d8 100644 (file)
@@ -1271,11 +1271,11 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
        return 0;
 }
 
-static void *
+static const void *
 acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
                                  const struct device *dev)
 {
-       return acpi_get_match_data(dev);
+       return acpi_device_get_match_data(dev);
 }
 
 #define DECLARE_ACPI_FWNODE_OPS(ops) \
index 89e97d21a89ce26c7b8a0f5b53802618c871a927..9d52743080a4f65200ea6e33f0d0d2fefdb881ea 100644 (file)
@@ -115,6 +115,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
                        table->serial_port.access_width))) {
                default:
                        pr_err("Unexpected SPCR Access Width.  Defaulting to byte size\n");
+                       /* fall through */
                case 8:
                        iotype = "mmio";
                        break;
index b2261f92f2f1c1356b8e2f78e915cce9efff36ba..5847364f25d96721ea6ce13f81bd8ead83504cb7 100644 (file)
@@ -310,6 +310,9 @@ static void __device_link_del(struct device_link *link)
        dev_info(link->consumer, "Dropping the link to %s\n",
                 dev_name(link->supplier));
 
+       if (link->flags & DL_FLAG_PM_RUNTIME)
+               pm_runtime_drop_link(link->consumer);
+
        list_del(&link->s_node);
        list_del(&link->c_node);
        device_link_free(link);
index a8ac86e4d79e73f758301bdc8f690dddb795b671..6637fc319269ba9f93a4c7ae44422723656033c8 100644 (file)
@@ -321,7 +321,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
                return;
 
        if (device_may_wakeup(wirq->dev)) {
-               if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
+               if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+                   !pm_runtime_status_suspended(wirq->dev))
                        enable_irq(wirq->irq);
 
                enable_irq_wake(wirq->irq);
@@ -343,7 +344,8 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
        if (device_may_wakeup(wirq->dev)) {
                disable_irq_wake(wirq->irq);
 
-               if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
+               if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+                   !pm_runtime_status_suspended(wirq->dev))
                        disable_irq_nosync(wirq->irq);
        }
 }
index 302236281d83012c23334ff752c680328a16769a..8f205f6461ed8cb2907284d9840717afafb02844 100644 (file)
@@ -1410,9 +1410,8 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
 }
 EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
 
-void *device_get_match_data(struct device *dev)
+const void *device_get_match_data(struct device *dev)
 {
-       return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data,
-                                 dev);
+       return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
 }
 EXPORT_SYMBOL_GPL(device_get_match_data);
index d1f5bb534e0e363ba4750b7b7f9035de94cfc4ec..6e9df558325bea7d139a1b726858837a586cde51 100644 (file)
@@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng)
        /* Enable secondary noise source on CPUs where it is present. */
 
        /* Nehemiah stepping 8 and higher */
-       if ((c->x86_model == 9) && (c->x86_mask > 7))
+       if ((c->x86_model == 9) && (c->x86_stepping > 7))
                lo |= VIA_NOISESRC2;
 
        /* Esther */
index 3a2ca0f79daf281c5940222f6b9da179b35f64f3..d0c34df0529c8a8ae425a9a1e845bab0672544ce 100644 (file)
@@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
        if (c->x86_vendor == X86_VENDOR_INTEL) {
                if ((c->x86 == 15) &&
                    (c->x86_model == 6) &&
-                   (c->x86_mask == 8)) {
+                   (c->x86_stepping == 8)) {
                        pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
                        return -ENODEV;
                    }
index 942632a27b50fb458f927ed69406eafab99511e3..f730b6528c185c8ab94393668afca956ee21eee7 100644 (file)
@@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
                break;
 
        case 7:
-               switch (c->x86_mask) {
+               switch (c->x86_stepping) {
                case 0:
                        longhaul_version = TYPE_LONGHAUL_V1;
                        cpu_model = CPU_SAMUEL2;
@@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
                        break;
                case 1 ... 15:
                        longhaul_version = TYPE_LONGHAUL_V2;
-                       if (c->x86_mask < 8) {
+                       if (c->x86_stepping < 8) {
                                cpu_model = CPU_SAMUEL2;
                                cpuname = "C3 'Samuel 2' [C5B]";
                        } else {
@@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
                numscales = 32;
                memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
                memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
-               switch (c->x86_mask) {
+               switch (c->x86_stepping) {
                case 0 ... 1:
                        cpu_model = CPU_NEHEMIAH;
                        cpuname = "C3 'Nehemiah A' [C5XLOE]";
index fd77812313f3ecd2ef485b6298141de322ce4d80..a25741b1281b46c13d8af408413bed82634cbdab 100644 (file)
@@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
 #endif
 
        /* Errata workaround */
-       cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
+       cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
        switch (cpuid) {
        case 0x0f07:
        case 0x0f0a:
index 80ac313e6c59c13fa28e9bf62cd616e5d25f9448..302e9ce793a0171e491e0374eee3a7fc1dac63ac 100644 (file)
@@ -131,7 +131,7 @@ static int check_powernow(void)
                return 0;
        }
 
-       if ((c->x86_model == 6) && (c->x86_mask == 0)) {
+       if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
                pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
                have_a0 = 1;
        }
index 41bc5397f4bbb3d0a08bcab5c3a75ad976680554..4fa5adf16c7014817485784f47d0e650d95e3344 100644 (file)
@@ -37,7 +37,7 @@ struct cpu_id
 {
        __u8    x86;            /* CPU family */
        __u8    x86_model;      /* model */
-       __u8    x86_mask;       /* stepping */
+       __u8    x86_stepping;   /* stepping */
 };
 
 enum {
@@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
 {
        if ((c->x86 == x->x86) &&
            (c->x86_model == x->x86_model) &&
-           (c->x86_mask == x->x86_mask))
+           (c->x86_stepping == x->x86_stepping))
                return 1;
        return 0;
 }
index 8085ec9000d19eb3c1ed3800844f4eeb12be8511..e3a9962ee4109b63f4815074211a232444298d8a 100644 (file)
@@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void)
                ebx = cpuid_ebx(0x00000001);
                ebx &= 0x000000FF;
 
-               pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
+               pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
 
-               switch (c->x86_mask) {
+               switch (c->x86_stepping) {
                case 4:
                        /*
                         * B-stepping [M-P4-M]
@@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void)
                                msr_lo, msr_hi);
                if ((msr_hi & (1<<18)) &&
                    (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
-                       if (c->x86_mask == 0x01) {
+                       if (c->x86_stepping == 0x01) {
                                pr_debug("early PIII version\n");
                                return SPEEDSTEP_CPU_PIII_C_EARLY;
                        } else
index 75d280cb2dc057f20e26e20362aabd1d1d6bd7c8..e843cf410373681848d61c45ef63ca6a560ae7d1 100644 (file)
@@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
                 * without any error (HW optimizations for later
                 * CAAM eras), then try again.
                 */
+               if (ret)
+                       break;
+
                rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
                if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
-                   !(rdsta_val & (1 << sh_idx)))
+                   !(rdsta_val & (1 << sh_idx))) {
                        ret = -EAGAIN;
-               if (ret)
                        break;
+               }
+
                dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
                /* Clear the contents before recreating the descriptor */
                memset(desc, 0x00, CAAM_CMD_SZ * 7);
index 4b6642a25df51e9315b816c9062791febdaf6d10..1c6cbda56afe9964c65f58e0cf43024b26b08ba3 100644 (file)
@@ -512,7 +512,7 @@ static int __init padlock_init(void)
 
        printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
 
-       if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
+       if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
                ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
                cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
                printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
index 0d01d16242527c919b99a284a48071fb5c61aa7b..63d636424161dccfbb5cdd12fd34b5db8e1915d1 100644 (file)
@@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
        algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
        ss = algt->ss;
 
-       spin_lock(&ss->slock);
+       spin_lock_bh(&ss->slock);
 
        writel(mode, ss->base + SS_CTL);
 
@@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
        }
 
        writel(0, ss->base + SS_CTL);
-       spin_unlock(&ss->slock);
-       return dlen;
+       spin_unlock_bh(&ss->slock);
+       return 0;
 }
index 9c80e0cb16647035fbfeea22ea1b3c9e6bf0a1b1..6882fa2f8badd171ce5b843a05a878a8af0bc46e 100644 (file)
@@ -1138,6 +1138,10 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
        struct talitos_private *priv = dev_get_drvdata(dev);
        bool is_sec1 = has_ftr_sec1(priv);
 
+       if (!src) {
+               to_talitos_ptr(ptr, 0, 0, is_sec1);
+               return 1;
+       }
        if (sg_count == 1) {
                to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
                return sg_count;
index 8b16ec595fa7273f125d4d0f0bdfaa8a41999c17..329cb96f886fd136062707324680327a3083b763 100644 (file)
@@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
        struct amd64_family_type *fam_type = NULL;
 
        pvt->ext_model  = boot_cpu_data.x86_model >> 4;
-       pvt->stepping   = boot_cpu_data.x86_mask;
+       pvt->stepping   = boot_cpu_data.x86_stepping;
        pvt->model      = boot_cpu_data.x86_model;
        pvt->fam        = boot_cpu_data.x86;
 
index e2c3c5ec42d1557d58474441782bcfd788972a80..c53095b3b0fb9754bd248113eee2877130c9e066 100644 (file)
@@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
        /* HG _PR3 doesn't seem to work on this A+A weston board */
        { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+       { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0, 0, 0, 0, 0 },
 };
 
index 909499b73d03acba1a1af1f6f5b77c7ccdd07b5c..021f722e248165d461953999b02a7683d37dc7fa 100644 (file)
@@ -733,6 +733,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
        return ret == 0 ? count : ret;
 }
 
+static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
+{
+       struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+       unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+       struct intel_gvt *gvt = vgpu->gvt;
+       int offset;
+
+       /* Only allow MMIO GGTT entry access */
+       if (index != PCI_BASE_ADDRESS_0)
+               return false;
+
+       offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
+               intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
+
+       return (offset >= gvt->device_info.gtt_start_offset &&
+               offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
+                       true : false;
+}
+
 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
                        size_t count, loff_t *ppos)
 {
@@ -742,7 +761,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
        while (count) {
                size_t filled;
 
-               if (count >= 4 && !(*ppos % 4)) {
+               /* Only support GGTT entry 8 bytes read */
+               if (count >= 8 && !(*ppos % 8) &&
+                       gtt_entry(mdev, ppos)) {
+                       u64 val;
+
+                       ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+                                       ppos, false);
+                       if (ret <= 0)
+                               goto read_err;
+
+                       if (copy_to_user(buf, &val, sizeof(val)))
+                               goto read_err;
+
+                       filled = 8;
+               } else if (count >= 4 && !(*ppos % 4)) {
                        u32 val;
 
                        ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
@@ -802,7 +835,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
        while (count) {
                size_t filled;
 
-               if (count >= 4 && !(*ppos % 4)) {
+               /* Only support GGTT entry 8 bytes write */
+               if (count >= 8 && !(*ppos % 8) &&
+                       gtt_entry(mdev, ppos)) {
+                       u64 val;
+
+                       if (copy_from_user(&val, buf, sizeof(val)))
+                               goto write_err;
+
+                       ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+                                       ppos, true);
+                       if (ret <= 0)
+                               goto write_err;
+
+                       filled = 8;
+               } else if (count >= 4 && !(*ppos % 4)) {
                        u32 val;
 
                        if (copy_from_user(&val, buf, sizeof(val)))
index 73ad6e90e49db17cc427c611f715f74ff1e710ec..256f1bb522b7a2edb5490c81be29aa46774e3ed1 100644 (file)
@@ -118,6 +118,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
        {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
        {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
        {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+       {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
        {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
        {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
        {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
index 7a2511538f340f5d5f6f7a06519bbc844e6fc4b1..736bd2bc5127f059e7705639275be775e433594c 100644 (file)
@@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio,
        TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
                 unsigned int old_val, unsigned int new_val),
 
-       TP_ARGS(old_id, new_id, action, reg, new_val, old_val),
+       TP_ARGS(old_id, new_id, action, reg, old_val, new_val),
 
        TP_STRUCT__entry(
                __field(int, old_id)
index 173d0095e3b2120e2bd2f4090cbe7da1e25c6619..2f5209de03915277879f9c5e01e597637b93836f 100644 (file)
@@ -1433,19 +1433,7 @@ void i915_driver_unload(struct drm_device *dev)
 
        intel_modeset_cleanup(dev);
 
-       /*
-        * free the memory space allocated for the child device
-        * config parsed from VBT
-        */
-       if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
-               kfree(dev_priv->vbt.child_dev);
-               dev_priv->vbt.child_dev = NULL;
-               dev_priv->vbt.child_dev_num = 0;
-       }
-       kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
-       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
-       kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
-       dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+       intel_bios_cleanup(dev_priv);
 
        vga_switcheroo_unregister_client(pdev);
        vga_client_register(pdev, NULL, NULL, NULL);
index a42deebedb0f12784155bb7499fef44a2c31cf28..d307429a5ae0a029e1f3b37c6ed76b39d9967b97 100644 (file)
@@ -1349,6 +1349,7 @@ struct intel_vbt_data {
                u32 size;
                u8 *data;
                const u8 *sequence[MIPI_SEQ_MAX];
+               u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
        } dsi;
 
        int crt_ddc_pin;
@@ -3657,6 +3658,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
 
 /* intel_bios.c */
 void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_cleanup(struct drm_i915_private *dev_priv);
 bool intel_bios_is_valid_vbt(const void *buf, size_t size);
 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
index 648e7536ff51e0eae1365971293ab150f72d9956..0c963fcf31ffd1d2527deaa8df796908a36ec027 100644 (file)
@@ -803,7 +803,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 
        case I915_CONTEXT_PARAM_PRIORITY:
                {
-                       int priority = args->value;
+                       s64 priority = args->value;
 
                        if (args->size)
                                ret = -EINVAL;
index 42ff06fe54a3a89b4e1d50e3ad2a033cc92a786a..792facdb6702bffdcb808f8bc748518c8299ac8e 100644 (file)
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
 void
 i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
 {
-       strncpy(dev_priv->perf.oa.test_config.uuid,
+       strlcpy(dev_priv->perf.oa.test_config.uuid,
                "577e8e2c-3fa0-4875-8743-3538d585e3b0",
-               UUID_STRING_LEN);
+               sizeof(dev_priv->perf.oa.test_config.uuid));
        dev_priv->perf.oa.test_config.id = 1;
 
        dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
index ff0ac3627cc4bc73858d6c71df9b9571a27de4df..ba9140c87cc0ba7de03f0c36ef91ebcaf372b4c8 100644 (file)
@@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
 void
 i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
 {
-       strncpy(dev_priv->perf.oa.test_config.uuid,
+       strlcpy(dev_priv->perf.oa.test_config.uuid,
                "db41edd4-d8e7-4730-ad11-b9a2d6833503",
-               UUID_STRING_LEN);
+               sizeof(dev_priv->perf.oa.test_config.uuid));
        dev_priv->perf.oa.test_config.id = 1;
 
        dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
index 55a8a1e2942483cda1d631cc623d56f2a75b4683..0e9b98c32b62b0225633a24d3fa41396f9ab70d8 100644 (file)
@@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915)
        return sum;
 }
 
-static void i915_pmu_event_destroy(struct perf_event *event)
+static void engine_event_destroy(struct perf_event *event)
 {
-       WARN_ON(event->parent);
+       struct drm_i915_private *i915 =
+               container_of(event->pmu, typeof(*i915), pmu.base);
+       struct intel_engine_cs *engine;
+
+       engine = intel_engine_lookup_user(i915,
+                                         engine_event_class(event),
+                                         engine_event_instance(event));
+       if (WARN_ON_ONCE(!engine))
+               return;
+
+       if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
+           intel_engine_supports_stats(engine))
+               intel_disable_engine_stats(engine);
 }
 
-static int engine_event_init(struct perf_event *event)
+static void i915_pmu_event_destroy(struct perf_event *event)
 {
-       struct drm_i915_private *i915 =
-               container_of(event->pmu, typeof(*i915), pmu.base);
+       WARN_ON(event->parent);
 
-       if (!intel_engine_lookup_user(i915, engine_event_class(event),
-                                     engine_event_instance(event)))
-               return -ENODEV;
+       if (is_engine_event(event))
+               engine_event_destroy(event);
+}
 
-       switch (engine_event_sample(event)) {
+static int
+engine_event_status(struct intel_engine_cs *engine,
+                   enum drm_i915_pmu_engine_sample sample)
+{
+       switch (sample) {
        case I915_SAMPLE_BUSY:
        case I915_SAMPLE_WAIT:
                break;
        case I915_SAMPLE_SEMA:
-               if (INTEL_GEN(i915) < 6)
+               if (INTEL_GEN(engine->i915) < 6)
                        return -ENODEV;
                break;
        default:
@@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event)
        return 0;
 }
 
+static int engine_event_init(struct perf_event *event)
+{
+       struct drm_i915_private *i915 =
+               container_of(event->pmu, typeof(*i915), pmu.base);
+       struct intel_engine_cs *engine;
+       u8 sample;
+       int ret;
+
+       engine = intel_engine_lookup_user(i915, engine_event_class(event),
+                                         engine_event_instance(event));
+       if (!engine)
+               return -ENODEV;
+
+       sample = engine_event_sample(event);
+       ret = engine_event_status(engine, sample);
+       if (ret)
+               return ret;
+
+       if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
+               ret = intel_enable_engine_stats(engine);
+
+       return ret;
+}
+
 static int i915_pmu_event_init(struct perf_event *event)
 {
        struct drm_i915_private *i915 =
@@ -370,7 +409,94 @@ static int i915_pmu_event_init(struct perf_event *event)
        return 0;
 }
 
-static u64 __i915_pmu_event_read(struct perf_event *event)
+static u64 __get_rc6(struct drm_i915_private *i915)
+{
+       u64 val;
+
+       val = intel_rc6_residency_ns(i915,
+                                    IS_VALLEYVIEW(i915) ?
+                                    VLV_GT_RENDER_RC6 :
+                                    GEN6_GT_GFX_RC6);
+
+       if (HAS_RC6p(i915))
+               val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
+
+       if (HAS_RC6pp(i915))
+               val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
+
+       return val;
+}
+
+static u64 get_rc6(struct drm_i915_private *i915, bool locked)
+{
+#if IS_ENABLED(CONFIG_PM)
+       unsigned long flags;
+       u64 val;
+
+       if (intel_runtime_pm_get_if_in_use(i915)) {
+               val = __get_rc6(i915);
+               intel_runtime_pm_put(i915);
+
+               /*
+                * If we are coming back from being runtime suspended we must
+                * be careful not to report a larger value than returned
+                * previously.
+                */
+
+               if (!locked)
+                       spin_lock_irqsave(&i915->pmu.lock, flags);
+
+               if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+                       i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+                       i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
+               } else {
+                       val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+               }
+
+               if (!locked)
+                       spin_unlock_irqrestore(&i915->pmu.lock, flags);
+       } else {
+               struct pci_dev *pdev = i915->drm.pdev;
+               struct device *kdev = &pdev->dev;
+               unsigned long flags2;
+
+               /*
+                * We are runtime suspended.
+                *
+                * Report the delta from when the device was suspended to now,
+                * on top of the last known real value, as the approximated RC6
+                * counter value.
+                */
+               if (!locked)
+                       spin_lock_irqsave(&i915->pmu.lock, flags);
+
+               spin_lock_irqsave(&kdev->power.lock, flags2);
+
+               if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+                       i915->pmu.suspended_jiffies_last =
+                                               kdev->power.suspended_jiffies;
+
+               val = kdev->power.suspended_jiffies -
+                     i915->pmu.suspended_jiffies_last;
+               val += jiffies - kdev->power.accounting_timestamp;
+
+               spin_unlock_irqrestore(&kdev->power.lock, flags2);
+
+               val = jiffies_to_nsecs(val);
+               val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+               i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+
+               if (!locked)
+                       spin_unlock_irqrestore(&i915->pmu.lock, flags);
+       }
+
+       return val;
+#else
+       return __get_rc6(i915);
+#endif
+}
+
+static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
 {
        struct drm_i915_private *i915 =
                container_of(event->pmu, typeof(*i915), pmu.base);
@@ -387,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
                if (WARN_ON_ONCE(!engine)) {
                        /* Do nothing */
                } else if (sample == I915_SAMPLE_BUSY &&
-                          engine->pmu.busy_stats) {
+                          intel_engine_supports_stats(engine)) {
                        val = ktime_to_ns(intel_engine_get_busy_time(engine));
                } else {
                        val = engine->pmu.sample[sample].cur;
@@ -408,18 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
                        val = count_interrupts(i915);
                        break;
                case I915_PMU_RC6_RESIDENCY:
-                       intel_runtime_pm_get(i915);
-                       val = intel_rc6_residency_ns(i915,
-                                                    IS_VALLEYVIEW(i915) ?
-                                                    VLV_GT_RENDER_RC6 :
-                                                    GEN6_GT_GFX_RC6);
-                       if (HAS_RC6p(i915))
-                               val += intel_rc6_residency_ns(i915,
-                                                             GEN6_GT_GFX_RC6p);
-                       if (HAS_RC6pp(i915))
-                               val += intel_rc6_residency_ns(i915,
-                                                             GEN6_GT_GFX_RC6pp);
-                       intel_runtime_pm_put(i915);
+                       val = get_rc6(i915, locked);
                        break;
                }
        }
@@ -434,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event)
 
 again:
        prev = local64_read(&hwc->prev_count);
-       new = __i915_pmu_event_read(event);
+       new = __i915_pmu_event_read(event, false);
 
        if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
                goto again;
@@ -442,12 +557,6 @@ static void i915_pmu_event_read(struct perf_event *event)
        local64_add(new - prev, &event->count);
 }
 
-static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
-{
-       return intel_engine_supports_stats(engine) &&
-              (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
-}
-
 static void i915_pmu_enable(struct perf_event *event)
 {
        struct drm_i915_private *i915 =
@@ -487,21 +596,7 @@ static void i915_pmu_enable(struct perf_event *event)
 
                GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
                GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
-               if (engine->pmu.enable_count[sample]++ == 0) {
-                       /*
-                        * Enable engine busy stats tracking if needed or
-                        * alternatively cancel the scheduled disable.
-                        *
-                        * If the delayed disable was pending, cancel it and
-                        * in this case do not enable since it already is.
-                        */
-                       if (engine_needs_busy_stats(engine) &&
-                           !engine->pmu.busy_stats) {
-                               engine->pmu.busy_stats = true;
-                               if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
-                                       intel_enable_engine_stats(engine);
-                       }
-               }
+               engine->pmu.enable_count[sample]++;
        }
 
        /*
@@ -509,19 +604,11 @@ static void i915_pmu_enable(struct perf_event *event)
         * for all listeners. Even when the event was already enabled and has
         * an existing non-zero value.
         */
-       local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+       local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
 
        spin_unlock_irqrestore(&i915->pmu.lock, flags);
 }
 
-static void __disable_busy_stats(struct work_struct *work)
-{
-       struct intel_engine_cs *engine =
-              container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
-
-       intel_disable_engine_stats(engine);
-}
-
 static void i915_pmu_disable(struct perf_event *event)
 {
        struct drm_i915_private *i915 =
@@ -545,26 +632,8 @@ static void i915_pmu_disable(struct perf_event *event)
                 * Decrement the reference count and clear the enabled
                 * bitmask when the last listener on an event goes away.
                 */
-               if (--engine->pmu.enable_count[sample] == 0) {
+               if (--engine->pmu.enable_count[sample] == 0)
                        engine->pmu.enable &= ~BIT(sample);
-                       if (!engine_needs_busy_stats(engine) &&
-                           engine->pmu.busy_stats) {
-                               engine->pmu.busy_stats = false;
-                               /*
-                                * We request a delayed disable to handle the
-                                * rapid on/off cycles on events, which can
-                                * happen when tools like perf stat start, in a
-                                * nicer way.
-                                *
-                                * In addition, this also helps with busy stats
-                                * accuracy with background CPU offline/online
-                                * migration events.
-                                */
-                               queue_delayed_work(system_wq,
-                                                  &engine->pmu.disable_busy_stats,
-                                                  round_jiffies_up_relative(HZ));
-                       }
-               }
        }
 
        GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
@@ -797,8 +866,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
 
 void i915_pmu_register(struct drm_i915_private *i915)
 {
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
        int ret;
 
        if (INTEL_GEN(i915) <= 2) {
@@ -820,10 +887,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
        hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        i915->pmu.timer.function = i915_sample;
 
-       for_each_engine(engine, i915, id)
-               INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
-                                 __disable_busy_stats);
-
        ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
        if (ret)
                goto err;
@@ -843,9 +906,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
 
 void i915_pmu_unregister(struct drm_i915_private *i915)
 {
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
        if (!i915->pmu.base.event_init)
                return;
 
@@ -853,11 +913,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
 
        hrtimer_cancel(&i915->pmu.timer);
 
-       for_each_engine(engine, i915, id) {
-               GEM_BUG_ON(engine->pmu.busy_stats);
-               flush_delayed_work(&engine->pmu.disable_busy_stats);
-       }
-
        i915_pmu_unregister_cpuhp_state(i915);
 
        perf_pmu_unregister(&i915->pmu.base);
index 40c154d13565a09b34399c462c18aa69cec24226..bb62df15afa4f4cf3f0b79cd3c1fa2b182a5bcca 100644 (file)
@@ -27,6 +27,8 @@
 enum {
        __I915_SAMPLE_FREQ_ACT = 0,
        __I915_SAMPLE_FREQ_REQ,
+       __I915_SAMPLE_RC6,
+       __I915_SAMPLE_RC6_ESTIMATED,
        __I915_NUM_PMU_SAMPLERS
 };
 
@@ -94,6 +96,10 @@ struct i915_pmu {
         * struct intel_engine_cs.
         */
        struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
+       /**
+        * @suspended_jiffies_last: Cached suspend time from PM core.
+        */
+       unsigned long suspended_jiffies_last;
 };
 
 #ifdef CONFIG_PERF_EVENTS
index f7f771749e4809dcedbea023bea54e4be4ac1537..b49a2df444301c82ce4fcc2de6b7111fe8a26a01 100644 (file)
@@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
        return 0;
 }
 
+/*
+ * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
+ * skip all delay + gpio operands and stop at the first DSI packet op.
+ */
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
+{
+       const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+       int index, len;
+
+       if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+               return 0;
+
+       /* index = 1 to skip sequence byte */
+       for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
+               switch (data[index]) {
+               case MIPI_SEQ_ELEM_SEND_PKT:
+                       return index == 1 ? 0 : index;
+               case MIPI_SEQ_ELEM_DELAY:
+                       len = 5; /* 1 byte for operand + uint32 */
+                       break;
+               case MIPI_SEQ_ELEM_GPIO:
+                       len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
+                       break;
+               default:
+                       return 0;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
+ * The deassert must be done before calling intel_dsi_device_ready, so for
+ * these devices we split the init OTP sequence into a deassert sequence and
+ * the actual init OTP part.
+ */
+static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
+{
+       u8 *init_otp;
+       int len;
+
+       /* Limit this to VLV for now. */
+       if (!IS_VALLEYVIEW(dev_priv))
+               return;
+
+       /* Limit this to v1 vid-mode sequences */
+       if (dev_priv->vbt.dsi.config->is_cmd_mode ||
+           dev_priv->vbt.dsi.seq_version != 1)
+               return;
+
+       /* Only do this if there are otp and assert seqs and no deassert seq */
+       if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+           !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+           dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+               return;
+
+       /* The deassert-sequence ends at the first DSI packet */
+       len = get_init_otp_deassert_fragment_len(dev_priv);
+       if (!len)
+               return;
+
+       DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
+
+       /* Copy the fragment, update seq byte and terminate it */
+       init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+       dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+       if (!dev_priv->vbt.dsi.deassert_seq)
+               return;
+       dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+       dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+       /* Use the copy for deassert */
+       dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+               dev_priv->vbt.dsi.deassert_seq;
+       /* Replace the last byte of the fragment with init OTP seq byte */
+       init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
+       /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
+       dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+}
+
 static void
 parse_mipi_sequence(struct drm_i915_private *dev_priv,
                    const struct bdb_header *bdb)
@@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
        dev_priv->vbt.dsi.size = seq_size;
        dev_priv->vbt.dsi.seq_version = sequence->version;
 
+       fixup_mipi_sequences(dev_priv);
+
        DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
        return;
 
@@ -1588,6 +1670,29 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
                pci_unmap_rom(pdev, bios);
 }
 
+/**
+ * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
+ * @dev_priv: i915 device instance
+ */
+void intel_bios_cleanup(struct drm_i915_private *dev_priv)
+{
+       kfree(dev_priv->vbt.child_dev);
+       dev_priv->vbt.child_dev = NULL;
+       dev_priv->vbt.child_dev_num = 0;
+       kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+       kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+       dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+       kfree(dev_priv->vbt.dsi.data);
+       dev_priv->vbt.dsi.data = NULL;
+       kfree(dev_priv->vbt.dsi.pps);
+       dev_priv->vbt.dsi.pps = NULL;
+       kfree(dev_priv->vbt.dsi.config);
+       dev_priv->vbt.dsi.config = NULL;
+       kfree(dev_priv->vbt.dsi.deassert_seq);
+       dev_priv->vbt.dsi.deassert_seq = NULL;
+}
+
 /**
  * intel_bios_is_tv_present - is integrated TV present in VBT
  * @dev_priv:  i915 device instance
index bd40fea16b4f1fc6d01bfdc6e80705d5234c9fce..f54ddda9fdadac01226cd65084674b91b807af76 100644 (file)
@@ -594,29 +594,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
        spin_unlock_irq(&b->rb_lock);
 }
 
-static bool signal_valid(const struct drm_i915_gem_request *request)
-{
-       return intel_wait_check_request(&request->signaling.wait, request);
-}
-
 static bool signal_complete(const struct drm_i915_gem_request *request)
 {
        if (!request)
                return false;
 
-       /* If another process served as the bottom-half it may have already
-        * signalled that this wait is already completed.
-        */
-       if (intel_wait_complete(&request->signaling.wait))
-               return signal_valid(request);
-
-       /* Carefully check if the request is complete, giving time for the
+       /*
+        * Carefully check if the request is complete, giving time for the
         * seqno to be visible or if the GPU hung.
         */
-       if (__i915_request_irq_complete(request))
-               return true;
-
-       return false;
+       return __i915_request_irq_complete(request);
 }
 
 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
@@ -659,9 +646,13 @@ static int intel_breadcrumbs_signaler(void *arg)
                        request = i915_gem_request_get_rcu(request);
                rcu_read_unlock();
                if (signal_complete(request)) {
-                       local_bh_disable();
-                       dma_fence_signal(&request->fence);
-                       local_bh_enable(); /* kick start the tasklets */
+                       if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                                     &request->fence.flags)) {
+                               local_bh_disable();
+                               dma_fence_signal(&request->fence);
+                               GEM_BUG_ON(!i915_gem_request_completed(request));
+                               local_bh_enable(); /* kick start the tasklets */
+                       }
 
                        spin_lock_irq(&b->rb_lock);
 
index 5dc118f26b51b7b63c8849e53b6a295c11328c80..1704c8897afd0f91458d490ff9531a4f3ad2414f 100644 (file)
@@ -1952,6 +1952,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
        if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
                min_cdclk = max(2 * 96000, min_cdclk);
 
+       /*
+        * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+        * than 320000KHz.
+        */
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
+           IS_VALLEYVIEW(dev_priv))
+               min_cdclk = max(320000, min_cdclk);
+
        if (min_cdclk > dev_priv->max_cdclk_freq) {
                DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
                              min_cdclk, dev_priv->max_cdclk_freq);
index d790bdc227ffb562fa58f50023e264d2bd8b5c38..fa960cfd2764f8e44d3d48d0e810cb2cfe4b087a 100644 (file)
@@ -1458,7 +1458,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
        struct drm_i915_private *dev_priv = engine->i915;
        bool idle = true;
 
-       intel_runtime_pm_get(dev_priv);
+       /* If the whole device is asleep, the engine must be idle */
+       if (!intel_runtime_pm_get_if_in_use(dev_priv))
+               return true;
 
        /* First check that no commands are left in the ring */
        if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
@@ -1943,16 +1945,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
  */
 int intel_enable_engine_stats(struct intel_engine_cs *engine)
 {
+       struct intel_engine_execlists *execlists = &engine->execlists;
        unsigned long flags;
+       int err = 0;
 
        if (!intel_engine_supports_stats(engine))
                return -ENODEV;
 
+       tasklet_disable(&execlists->tasklet);
        spin_lock_irqsave(&engine->stats.lock, flags);
-       if (engine->stats.enabled == ~0)
-               goto busy;
+
+       if (unlikely(engine->stats.enabled == ~0)) {
+               err = -EBUSY;
+               goto unlock;
+       }
+
        if (engine->stats.enabled++ == 0) {
-               struct intel_engine_execlists *execlists = &engine->execlists;
                const struct execlist_port *port = execlists->port;
                unsigned int num_ports = execlists_num_ports(execlists);
 
@@ -1967,14 +1975,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
                if (engine->stats.active)
                        engine->stats.start = engine->stats.enabled_at;
        }
-       spin_unlock_irqrestore(&engine->stats.lock, flags);
-
-       return 0;
 
-busy:
+unlock:
        spin_unlock_irqrestore(&engine->stats.lock, flags);
+       tasklet_enable(&execlists->tasklet);
 
-       return -EBUSY;
+       return err;
 }
 
 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
index c5ff203e42d6a8627d3c6bb18c589a103311d0c3..a0e7a6c2a57cd8f0b66f27e7e5dcc874857fd649 100644 (file)
@@ -366,20 +366,6 @@ struct intel_engine_cs {
                 */
 #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
                struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
-               /**
-                * @busy_stats: Has enablement of engine stats tracking been
-                *              requested.
-                */
-               bool busy_stats;
-               /**
-                * @disable_busy_stats: Work item for busy stats disabling.
-                *
-                * Same as with @enable_busy_stats action, with the difference
-                * that we delay it in case there are rapid enable-disable
-                * actions, which can happen during tool startup (like perf
-                * stat).
-                */
-               struct delayed_work disable_busy_stats;
        } pmu;
 
        /*
index bf62303571b39addb787e5237635f55edf9f42ba..3695cde669f881335445fb14db5e15f8c26f565a 100644 (file)
@@ -301,7 +301,7 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,
 void
 nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
 {
-       if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
+       if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled)
                return;
 
        nvkm_debug(&therm->subdev,
@@ -312,7 +312,7 @@ nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
 void
 nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
 {
-       if (!therm->func->clkgate_fini || !therm->clkgating_enabled)
+       if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled)
                return;
 
        nvkm_debug(&therm->subdev,
@@ -395,7 +395,7 @@ void
 nvkm_therm_clkgate_init(struct nvkm_therm *therm,
                        const struct nvkm_therm_clkgate_pack *p)
 {
-       if (!therm->func->clkgate_init || !therm->clkgating_enabled)
+       if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled)
                return;
 
        therm->func->clkgate_init(therm, p);
index 4bdbf77f7197fc039e240021f9e15d5f4eae76e8..72c338eb5fae5a94c5558ebe5aae4530bb649763 100644 (file)
@@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
        for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
                const struct tjmax_model *tm = &tjmax_model_table[i];
                if (c->x86_model == tm->model &&
-                   (tm->mask == ANY || c->x86_mask == tm->mask))
+                   (tm->mask == ANY || c->x86_stepping == tm->mask))
                        return tm->tjmax;
        }
 
        /* Early chips have no MSR for TjMax */
 
-       if (c->x86_model == 0xf && c->x86_mask < 4)
+       if (c->x86_model == 0xf && c->x86_stepping < 4)
                usemsr_ee = 0;
 
        if (c->x86_model > 0xe && usemsr_ee) {
@@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu)
         * Readings might stop update when processor visited too deep sleep,
         * fixed for stepping D0 (6EC).
         */
-       if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
+       if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
                pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
                return -ENODEV;
        }
index ef91b8a6754924319c781b89eb83b2ec393fb3a9..84e91286fc4fde72e30502516b0205213efa9c45 100644 (file)
@@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
        if (c->x86 < 6)         /* Any CPU with family lower than 6 */
                return 0;       /* doesn't have VID */
 
-       vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor);
+       vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
        if (vrm_ret == 134)
                vrm_ret = get_via_model_d_vrm();
        if (vrm_ret == 0)
index 06b4e1c78bd8f175e258041584d546ed8cf5538f..051a72eecb2455794d51becf66ea66348e6f17c9 100644 (file)
@@ -129,7 +129,10 @@ static ssize_t temp1_input_show(struct device *dev,
 
        data->read_tempreg(data->pdev, &regval);
        temp = (regval >> 21) * 125;
-       temp -= data->temp_offset;
+       if (temp > data->temp_offset)
+               temp -= data->temp_offset;
+       else
+               temp = 0;
 
        return sprintf(buf, "%u\n", temp);
 }
@@ -227,7 +230,7 @@ static bool has_erratum_319(struct pci_dev *pdev)
         * and AM3 formats, but that's the best we can do.
         */
        return boot_cpu_data.x86_model < 4 ||
-              (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
+              (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
 }
 
 static int k10temp_probe(struct pci_dev *pdev,
index 5a632bcf869bbf78d3835474a066eaa76b84688d..e59f9113fb93b0834209dfd1d0abe4e03f64327d 100644 (file)
@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,
                return -ENOMEM;
 
        model = boot_cpu_data.x86_model;
-       stepping = boot_cpu_data.x86_mask;
+       stepping = boot_cpu_data.x86_stepping;
 
        /* feature available since SH-C0, exclude older revisions */
        if ((model == 4 && stepping == 0) ||
index 55cfb986225be79386b3d6487b953ff63ca4ab59..faf734ff4cf3bc69c2a27d8b0ef1555530584fd1 100644 (file)
@@ -339,9 +339,6 @@ int __init bcm7038_l1_of_init(struct device_node *dn,
                goto out_unmap;
        }
 
-       pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n",
-               intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words);
-
        return 0;
 
 out_unmap:
index 983640eba418ec4e2658e10ddc93bf1f1815fc07..8968e5e93fcb8e3bf478329b765c149a47153cd0 100644 (file)
@@ -318,9 +318,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
                }
        }
 
-       pr_info("registered %s intc (mem: 0x%p, parent IRQ(s): %d)\n",
-                       intc_name, data->map_base[0], data->num_parent_irqs);
-
        return 0;
 
 out_free_domain:
index 691d20eb0bec1137c403c6d91097c67a1d906fac..0e65f609352ecee0519e2b207391b8ccace57869 100644 (file)
@@ -262,9 +262,6 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
                ct->chip.irq_set_wake = irq_gc_set_wake;
        }
 
-       pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n",
-                       base, parent_irq);
-
        return 0;
 
 out_free_domain:
index 993a8426a45384a650ac1b68b19760d1a47ee684..1ff38aff9f29f32f895bc9a1975404a0f0e2ce3f 100644 (file)
@@ -94,7 +94,7 @@ static struct irq_chip gicv2m_msi_irq_chip = {
 
 static struct msi_domain_info gicv2m_msi_domain_info = {
        .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
-                  MSI_FLAG_PCI_MSIX),
+                  MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
        .chip   = &gicv2m_msi_irq_chip,
 };
 
@@ -155,18 +155,12 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
        return 0;
 }
 
-static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
+static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
+                              int nr_irqs)
 {
-       int pos;
-
-       pos = hwirq - v2m->spi_start;
-       if (pos < 0 || pos >= v2m->nr_spis) {
-               pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
-               return;
-       }
-
        spin_lock(&v2m_lock);
-       __clear_bit(pos, v2m->bm);
+       bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
+                             get_count_order(nr_irqs));
        spin_unlock(&v2m_lock);
 }
 
@@ -174,13 +168,13 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
                                   unsigned int nr_irqs, void *args)
 {
        struct v2m_data *v2m = NULL, *tmp;
-       int hwirq, offset, err = 0;
+       int hwirq, offset, i, err = 0;
 
        spin_lock(&v2m_lock);
        list_for_each_entry(tmp, &v2m_nodes, entry) {
-               offset = find_first_zero_bit(tmp->bm, tmp->nr_spis);
-               if (offset < tmp->nr_spis) {
-                       __set_bit(offset, tmp->bm);
+               offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
+                                                get_count_order(nr_irqs));
+               if (offset >= 0) {
                        v2m = tmp;
                        break;
                }
@@ -192,16 +186,21 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 
        hwirq = v2m->spi_start + offset;
 
-       err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
-       if (err) {
-               gicv2m_unalloc_msi(v2m, hwirq);
-               return err;
-       }
+       for (i = 0; i < nr_irqs; i++) {
+               err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+               if (err)
+                       goto fail;
 
-       irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
-                                     &gicv2m_irq_chip, v2m);
+               irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+                                             &gicv2m_irq_chip, v2m);
+       }
 
        return 0;
+
+fail:
+       irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+       gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
+       return err;
 }
 
 static void gicv2m_irq_domain_free(struct irq_domain *domain,
@@ -210,8 +209,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain,
        struct irq_data *d = irq_domain_get_irq_data(domain, virq);
        struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
 
-       BUG_ON(nr_irqs != 1);
-       gicv2m_unalloc_msi(v2m, d->hwirq);
+       gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
        irq_domain_free_irqs_parent(domain, virq, nr_irqs);
 }
 
index 14a8c0a7e095eb32c383bccbb163174ce1ccec28..25a98de5cfb2831fa61d33fd3be3ef30d3f4e534 100644 (file)
@@ -132,6 +132,8 @@ static int __init its_pci_of_msi_init(void)
 
        for (np = of_find_matching_node(NULL, its_device_id); np;
             np = of_find_matching_node(np, its_device_id)) {
+               if (!of_device_is_available(np))
+                       continue;
                if (!of_property_read_bool(np, "msi-controller"))
                        continue;
 
index 833a90fe33aed839a81b781831027e677eef5581..8881a053c173edfdb11ad322b9b60f9b61ef57aa 100644 (file)
@@ -154,6 +154,8 @@ static void __init its_pmsi_of_init(void)
 
        for (np = of_find_matching_node(NULL, its_device_id); np;
             np = of_find_matching_node(np, its_device_id)) {
+               if (!of_device_is_available(np))
+                       continue;
                if (!of_property_read_bool(np, "msi-controller"))
                        continue;
 
index 06f025fd5726f6b230d51c880e7b8accf9e8c738..1d3056f5374721f794b5794baa678cb381cccc04 100644 (file)
@@ -3314,6 +3314,8 @@ static int __init its_of_probe(struct device_node *node)
 
        for (np = of_find_matching_node(node, its_device_id); np;
             np = of_find_matching_node(np, its_device_id)) {
+               if (!of_device_is_available(np))
+                       continue;
                if (!of_property_read_bool(np, "msi-controller")) {
                        pr_warn("%pOF: no msi-controller property, ITS ignored\n",
                                np);
index a57c0fbbd34a4af5651284666402be9fe5fea033..d99cc07903ec497279e3baf563743d9146e77f09 100644 (file)
@@ -673,7 +673,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
               MPIDR_TO_SGI_RS(cluster_id)              |
               tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
 
-       pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+       pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
        gic_write_sgi1r(val);
 }
 
@@ -688,7 +688,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
         * Ensure that stores to Normal memory are visible to the
         * other CPUs before issuing the IPI.
         */
-       smp_wmb();
+       wmb();
 
        for_each_cpu(cpu, mask) {
                u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
index ef92a4d2038eef7f2c09ad9eaabf91adc3957435..d32268cc1174c75b8cc0942c15637916a5a333ff 100644 (file)
@@ -424,8 +424,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
        spin_lock_irqsave(&gic_lock, flags);
        write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
        write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
-       gic_clear_pcpu_masks(intr);
-       set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
        irq_data_update_effective_affinity(data, cpumask_of(cpu));
        spin_unlock_irqrestore(&gic_lock, flags);
 
index 62f541f968f6f8e1161967b82aa1979d471fba90..07074820a16746ec74351ea2273cfecee27e6353 100644 (file)
@@ -375,6 +375,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
        dev->ofdev.dev.of_node = np;
        dev->ofdev.archdata.dma_mask = 0xffffffffUL;
        dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask;
+       dev->ofdev.dev.coherent_dma_mask = dev->ofdev.archdata.dma_mask;
        dev->ofdev.dev.parent = parent;
        dev->ofdev.dev.bus = &macio_bus_type;
        dev->ofdev.dev.release = macio_release_dev;
index d6de00f367efdde4137055dfa4c82fefcbcd1f9a..68136806d365821f63ace7675ce21ba2bf10ed8d 100644 (file)
@@ -903,7 +903,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
                        queue_io(md, bio);
                } else {
                        /* done with normal IO or empty flush */
-                       bio->bi_status = io_error;
+                       if (io_error)
+                               bio->bi_status = io_error;
                        bio_endio(bio);
                }
        }
index d9aa407db06a11acd4c369b993c242c6a5bac3bd..2dd2db9bc1c90f771ab19a4feabb167a6c367071 100644 (file)
@@ -277,7 +277,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
        struct ocxl_context *ctx = file->private_data;
        struct ocxl_kernel_event_header header;
        ssize_t rc;
-       size_t used = 0;
+       ssize_t used = 0;
        DEFINE_WAIT(event_wait);
 
        memset(&header, 0, sizeof(header));
index 229dc18f0581beb26733222be97f981b2038fb60..768972af8b853cf882b1b3311ed8fa1b0a64a49e 100644 (file)
@@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host)
        char pio_limit_string[20];
        int ret;
 
-       mmc->f_max = host->max_clk;
+       if (!mmc->f_max || mmc->f_max > host->max_clk)
+               mmc->f_max = host->max_clk;
        mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
 
        mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
index 22438ebfe4e627a9edf789ba5b146158b3a6622e..4f972b879fe6f36ef0018d82a1b71f57c2de3cce 100644 (file)
@@ -717,22 +717,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
 static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
        struct meson_host *host = mmc_priv(mmc);
-       int ret;
-
-       /*
-        * If this is the initial tuning, try to get a sane Rx starting
-        * phase before doing the actual tuning.
-        */
-       if (!mmc->doing_retune) {
-               ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
-
-               if (ret)
-                       return ret;
-       }
-
-       ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
-       if (ret)
-               return ret;
 
        return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
 }
@@ -763,9 +747,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                if (!IS_ERR(mmc->supply.vmmc))
                        mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 
-               /* Reset phases */
+               /* Reset rx phase */
                clk_set_phase(host->rx_clk, 0);
-               clk_set_phase(host->tx_clk, 270);
 
                break;
 
index e6b8c59f2c0da7c0f5f8be9fc609e8f96b8b2140..736ac887303c88baa86ab4857653c322d1d2f60b 100644 (file)
@@ -328,7 +328,7 @@ config MTD_NAND_MARVELL
        tristate "NAND controller support on Marvell boards"
        depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
                   COMPILE_TEST
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && HAS_DMA
        help
          This enables the NAND flash controller driver for Marvell boards,
          including:
index 80d31a58e558cc14582c0b0c97ed77a4615cf454..f367144f3c6f39849dfcd35422a117dcedcb9b0e 100644 (file)
@@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
                if (mtd->oobsize > 64)
                        mtd->oobsize = 64;
 
-               /*
-                * mtd->ecclayout is not specified here because we're using the
-                * default large page ECC layout defined in NAND core.
-                */
+               /* Use default large page ECC layout defined in NAND core */
+               mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
                if (chip->ecc.strength == 32) {
                        nfc->ecc_mode = ECC_60_BYTE;
                        chip->ecc.bytes = 60;
index f431c32774f3612f5903ff88cb7541b246626ff2..0fe7ea35c2217406af6f8c071a417e08e98dab6a 100644 (file)
@@ -120,8 +120,12 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
        int ret;
 
        ret = nvme_reset_ctrl(ctrl);
-       if (!ret)
+       if (!ret) {
                flush_work(&ctrl->reset_work);
+               if (ctrl->state != NVME_CTRL_LIVE)
+                       ret = -ENETRESET;
+       }
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
@@ -265,7 +269,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
        switch (new_state) {
        case NVME_CTRL_ADMIN_ONLY:
                switch (old_state) {
-               case NVME_CTRL_RECONNECTING:
+               case NVME_CTRL_CONNECTING:
                        changed = true;
                        /* FALLTHRU */
                default:
@@ -276,7 +280,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                switch (old_state) {
                case NVME_CTRL_NEW:
                case NVME_CTRL_RESETTING:
-               case NVME_CTRL_RECONNECTING:
+               case NVME_CTRL_CONNECTING:
                        changed = true;
                        /* FALLTHRU */
                default:
@@ -294,9 +298,9 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                        break;
                }
                break;
-       case NVME_CTRL_RECONNECTING:
+       case NVME_CTRL_CONNECTING:
                switch (old_state) {
-               case NVME_CTRL_LIVE:
+               case NVME_CTRL_NEW:
                case NVME_CTRL_RESETTING:
                        changed = true;
                        /* FALLTHRU */
@@ -309,7 +313,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                case NVME_CTRL_LIVE:
                case NVME_CTRL_ADMIN_ONLY:
                case NVME_CTRL_RESETTING:
-               case NVME_CTRL_RECONNECTING:
+               case NVME_CTRL_CONNECTING:
                        changed = true;
                        /* FALLTHRU */
                default:
@@ -518,9 +522,11 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
                u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
                u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
 
-               range[n].cattr = cpu_to_le32(0);
-               range[n].nlb = cpu_to_le32(nlb);
-               range[n].slba = cpu_to_le64(slba);
+               if (n < segments) {
+                       range[n].cattr = cpu_to_le32(0);
+                       range[n].nlb = cpu_to_le32(nlb);
+                       range[n].slba = cpu_to_le64(slba);
+               }
                n++;
        }
 
@@ -794,13 +800,9 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
 
 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
 {
-       struct nvme_command c;
        struct request *rq;
 
-       memset(&c, 0, sizeof(c));
-       c.common.opcode = nvme_admin_keep_alive;
-
-       rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
+       rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
                        NVME_QID_ANY);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
@@ -832,6 +834,8 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
                return;
 
        INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+       memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
+       ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
        schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
 }
 EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
@@ -1117,14 +1121,19 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 
 static void nvme_update_formats(struct nvme_ctrl *ctrl)
 {
-       struct nvme_ns *ns;
+       struct nvme_ns *ns, *next;
+       LIST_HEAD(rm_list);
 
        mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               if (ns->disk && nvme_revalidate_disk(ns->disk))
-                       nvme_ns_remove(ns);
+               if (ns->disk && nvme_revalidate_disk(ns->disk)) {
+                       list_move_tail(&ns->list, &rm_list);
+               }
        }
        mutex_unlock(&ctrl->namespaces_mutex);
+
+       list_for_each_entry_safe(ns, next, &rm_list, list)
+               nvme_ns_remove(ns);
 }
 
 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -2687,7 +2696,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
                [NVME_CTRL_LIVE]        = "live",
                [NVME_CTRL_ADMIN_ONLY]  = "only-admin",
                [NVME_CTRL_RESETTING]   = "resetting",
-               [NVME_CTRL_RECONNECTING]= "reconnecting",
+               [NVME_CTRL_CONNECTING]  = "connecting",
                [NVME_CTRL_DELETING]    = "deleting",
                [NVME_CTRL_DEAD]        = "dead",
        };
index 25b19f722f5b20508ed35d408305dde734498432..a3145d90c1d2c20f8c555067a989cf4a428a1790 100644 (file)
@@ -171,13 +171,14 @@ static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
            cmd->common.opcode != nvme_fabrics_command ||
            cmd->fabrics.fctype != nvme_fabrics_type_connect) {
                /*
-                * Reconnecting state means transport disruption, which can take
-                * a long time and even might fail permanently, fail fast to
-                * give upper layers a chance to failover.
+                * Connecting state means transport disruption or initial
+                * establishment, which can take a long time and even might
+                * fail permanently, fail fast to give upper layers a chance
+                * to failover.
                 * Deleting state means that the ctrl will never accept commands
                 * again, fail it permanently.
                 */
-               if (ctrl->state == NVME_CTRL_RECONNECTING ||
+               if (ctrl->state == NVME_CTRL_CONNECTING ||
                    ctrl->state == NVME_CTRL_DELETING) {
                        nvme_req(rq)->status = NVME_SC_ABORT_REQ;
                        return BLK_STS_IOERR;
index b856d7c919d298062e2e55d8495ca18891d4f0f2..7f51f8414b97238e647ef37f13942755e4b83a16 100644 (file)
@@ -55,9 +55,7 @@ struct nvme_fc_queue {
 
 enum nvme_fcop_flags {
        FCOP_FLAGS_TERMIO       = (1 << 0),
-       FCOP_FLAGS_RELEASED     = (1 << 1),
-       FCOP_FLAGS_COMPLETE     = (1 << 2),
-       FCOP_FLAGS_AEN          = (1 << 3),
+       FCOP_FLAGS_AEN          = (1 << 1),
 };
 
 struct nvmefc_ls_req_op {
@@ -532,7 +530,7 @@ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
 {
        switch (ctrl->ctrl.state) {
        case NVME_CTRL_NEW:
-       case NVME_CTRL_RECONNECTING:
+       case NVME_CTRL_CONNECTING:
                /*
                 * As all reconnects were suppressed, schedule a
                 * connect.
@@ -777,7 +775,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
                }
                break;
 
-       case NVME_CTRL_RECONNECTING:
+       case NVME_CTRL_CONNECTING:
                /*
                 * The association has already been terminated and the
                 * controller is attempting reconnects.  No need to do anything
@@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
 
 /* *********************** NVME Ctrl Routines **************************** */
 
-static void __nvme_fc_final_op_cleanup(struct request *rq);
 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
 
 static int
@@ -1512,13 +1509,19 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
 static int
 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
 {
-       int state;
+       unsigned long flags;
+       int opstate;
+
+       spin_lock_irqsave(&ctrl->lock, flags);
+       opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
+       if (opstate != FCPOP_STATE_ACTIVE)
+               atomic_set(&op->state, opstate);
+       else if (ctrl->flags & FCCTRL_TERMIO)
+               ctrl->iocnt++;
+       spin_unlock_irqrestore(&ctrl->lock, flags);
 
-       state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
-       if (state != FCPOP_STATE_ACTIVE) {
-               atomic_set(&op->state, state);
+       if (opstate != FCPOP_STATE_ACTIVE)
                return -ECANCELED;
-       }
 
        ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
                                        &ctrl->rport->remoteport,
@@ -1532,60 +1535,26 @@ static void
 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
 {
        struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
-       unsigned long flags;
-       int i, ret;
-
-       for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
-               if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
-                       continue;
-
-               spin_lock_irqsave(&ctrl->lock, flags);
-               if (ctrl->flags & FCCTRL_TERMIO) {
-                       ctrl->iocnt++;
-                       aen_op->flags |= FCOP_FLAGS_TERMIO;
-               }
-               spin_unlock_irqrestore(&ctrl->lock, flags);
-
-               ret = __nvme_fc_abort_op(ctrl, aen_op);
-               if (ret) {
-                       /*
-                        * if __nvme_fc_abort_op failed the io wasn't
-                        * active. Thus this call path is running in
-                        * parallel to the io complete. Treat as non-error.
-                        */
+       int i;
 
-                       /* back out the flags/counters */
-                       spin_lock_irqsave(&ctrl->lock, flags);
-                       if (ctrl->flags & FCCTRL_TERMIO)
-                               ctrl->iocnt--;
-                       aen_op->flags &= ~FCOP_FLAGS_TERMIO;
-                       spin_unlock_irqrestore(&ctrl->lock, flags);
-                       return;
-               }
-       }
+       for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
+               __nvme_fc_abort_op(ctrl, aen_op);
 }
 
-static inline int
+static inline void
 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
-               struct nvme_fc_fcp_op *op)
+               struct nvme_fc_fcp_op *op, int opstate)
 {
        unsigned long flags;
-       bool complete_rq = false;
 
-       spin_lock_irqsave(&ctrl->lock, flags);
-       if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
+       if (opstate == FCPOP_STATE_ABORTED) {
+               spin_lock_irqsave(&ctrl->lock, flags);
                if (ctrl->flags & FCCTRL_TERMIO) {
                        if (!--ctrl->iocnt)
                                wake_up(&ctrl->ioabort_wait);
                }
+               spin_unlock_irqrestore(&ctrl->lock, flags);
        }
-       if (op->flags & FCOP_FLAGS_RELEASED)
-               complete_rq = true;
-       else
-               op->flags |= FCOP_FLAGS_COMPLETE;
-       spin_unlock_irqrestore(&ctrl->lock, flags);
-
-       return complete_rq;
 }
 
 static void
@@ -1601,6 +1570,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
        __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
        union nvme_result result;
        bool terminate_assoc = true;
+       int opstate;
 
        /*
         * WARNING:
@@ -1639,11 +1609,12 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
         * association to be terminated.
         */
 
+       opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
+
        fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
                                sizeof(op->rsp_iu), DMA_FROM_DEVICE);
 
-       if (atomic_read(&op->state) == FCPOP_STATE_ABORTED ||
-                       op->flags & FCOP_FLAGS_TERMIO)
+       if (opstate == FCPOP_STATE_ABORTED)
                status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
        else if (freq->status)
                status = cpu_to_le16(NVME_SC_INTERNAL << 1);
@@ -1708,7 +1679,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 done:
        if (op->flags & FCOP_FLAGS_AEN) {
                nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
-               __nvme_fc_fcpop_chk_teardowns(ctrl, op);
+               __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
                atomic_set(&op->state, FCPOP_STATE_IDLE);
                op->flags = FCOP_FLAGS_AEN;     /* clear other flags */
                nvme_fc_ctrl_put(ctrl);
@@ -1722,13 +1693,11 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
        if (status &&
            (blk_queue_dying(rq->q) ||
             ctrl->ctrl.state == NVME_CTRL_NEW ||
-            ctrl->ctrl.state == NVME_CTRL_RECONNECTING))
+            ctrl->ctrl.state == NVME_CTRL_CONNECTING))
                status |= cpu_to_le16(NVME_SC_DNR << 1);
 
-       if (__nvme_fc_fcpop_chk_teardowns(ctrl, op))
-               __nvme_fc_final_op_cleanup(rq);
-       else
-               nvme_end_request(rq, status, result);
+       __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
+       nvme_end_request(rq, status, result);
 
 check_error:
        if (terminate_assoc)
@@ -2415,46 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
 }
 
 static void
-__nvme_fc_final_op_cleanup(struct request *rq)
+nvme_fc_complete_rq(struct request *rq)
 {
        struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
        struct nvme_fc_ctrl *ctrl = op->ctrl;
 
        atomic_set(&op->state, FCPOP_STATE_IDLE);
-       op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
-                       FCOP_FLAGS_COMPLETE);
 
        nvme_fc_unmap_data(ctrl, rq, op);
        nvme_complete_rq(rq);
        nvme_fc_ctrl_put(ctrl);
-
-}
-
-static void
-nvme_fc_complete_rq(struct request *rq)
-{
-       struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
-       struct nvme_fc_ctrl *ctrl = op->ctrl;
-       unsigned long flags;
-       bool completed = false;
-
-       /*
-        * the core layer, on controller resets after calling
-        * nvme_shutdown_ctrl(), calls complete_rq without our
-        * calling blk_mq_complete_request(), thus there may still
-        * be live i/o outstanding with the LLDD. Means transport has
-        * to track complete calls vs fcpio_done calls to know what
-        * path to take on completes and dones.
-        */
-       spin_lock_irqsave(&ctrl->lock, flags);
-       if (op->flags & FCOP_FLAGS_COMPLETE)
-               completed = true;
-       else
-               op->flags |= FCOP_FLAGS_RELEASED;
-       spin_unlock_irqrestore(&ctrl->lock, flags);
-
-       if (completed)
-               __nvme_fc_final_op_cleanup(rq);
 }
 
 /*
@@ -2476,35 +2415,11 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
        struct nvme_ctrl *nctrl = data;
        struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
        struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
-       unsigned long flags;
-       int status;
 
        if (!blk_mq_request_started(req))
                return;
 
-       spin_lock_irqsave(&ctrl->lock, flags);
-       if (ctrl->flags & FCCTRL_TERMIO) {
-               ctrl->iocnt++;
-               op->flags |= FCOP_FLAGS_TERMIO;
-       }
-       spin_unlock_irqrestore(&ctrl->lock, flags);
-
-       status = __nvme_fc_abort_op(ctrl, op);
-       if (status) {
-               /*
-                * if __nvme_fc_abort_op failed the io wasn't
-                * active. Thus this call path is running in
-                * parallel to the io complete. Treat as non-error.
-                */
-
-               /* back out the flags/counters */
-               spin_lock_irqsave(&ctrl->lock, flags);
-               if (ctrl->flags & FCCTRL_TERMIO)
-                       ctrl->iocnt--;
-               op->flags &= ~FCOP_FLAGS_TERMIO;
-               spin_unlock_irqrestore(&ctrl->lock, flags);
-               return;
-       }
+       __nvme_fc_abort_op(ctrl, op);
 }
 
 
@@ -2943,7 +2858,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
        unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
        bool recon = true;
 
-       if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING)
+       if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
                return;
 
        if (portptr->port_state == FC_OBJSTATE_ONLINE)
@@ -2991,10 +2906,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
        /* will block will waiting for io to terminate */
        nvme_fc_delete_association(ctrl);
 
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
                dev_err(ctrl->ctrl.device,
                        "NVME-FC{%d}: error_recovery: Couldn't change state "
-                       "to RECONNECTING\n", ctrl->cnum);
+                       "to CONNECTING\n", ctrl->cnum);
                return;
        }
 
@@ -3195,7 +3110,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
         * transport errors (frame drop, LS failure) inherently must kill
         * the association. The transport is coded so that any command used
         * to create the association (prior to a LIVE state transition
-        * while NEW or RECONNECTING) will fail if it completes in error or
+        * while NEW or CONNECTING) will fail if it completes in error or
         * times out.
         *
         * As such: as the connect request was mostly likely due to a
index 8e4550fa08f8bd775e7e5e8e0c169e287845509a..0521e4707d1cfe193a2193d48e1332e174087e3a 100644 (file)
@@ -123,7 +123,7 @@ enum nvme_ctrl_state {
        NVME_CTRL_LIVE,
        NVME_CTRL_ADMIN_ONLY,    /* Only admin queue live */
        NVME_CTRL_RESETTING,
-       NVME_CTRL_RECONNECTING,
+       NVME_CTRL_CONNECTING,
        NVME_CTRL_DELETING,
        NVME_CTRL_DEAD,
 };
@@ -183,6 +183,7 @@ struct nvme_ctrl {
        struct work_struct scan_work;
        struct work_struct async_event_work;
        struct delayed_work ka_work;
+       struct nvme_command ka_cmd;
        struct work_struct fw_act_work;
 
        /* Power saving configuration */
index 6fe7af00a1f42a7dcb3354ac49db499cef6f9c88..73036d2fbbd58da19fbcd880accafdcb3f8165e7 100644 (file)
@@ -1141,7 +1141,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
        /* If there is a reset/reinit ongoing, we shouldn't reset again. */
        switch (dev->ctrl.state) {
        case NVME_CTRL_RESETTING:
-       case NVME_CTRL_RECONNECTING:
+       case NVME_CTRL_CONNECTING:
                return false;
        default:
                break;
@@ -1215,13 +1215,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
         * cancellation error. All outstanding requests are completed on
         * shutdown, so we return BLK_EH_HANDLED.
         */
-       if (dev->ctrl.state == NVME_CTRL_RESETTING) {
+       switch (dev->ctrl.state) {
+       case NVME_CTRL_CONNECTING:
+       case NVME_CTRL_RESETTING:
                dev_warn(dev->ctrl.device,
                         "I/O %d QID %d timeout, disable controller\n",
                         req->tag, nvmeq->qid);
                nvme_dev_disable(dev, false);
                nvme_req(req)->flags |= NVME_REQ_CANCELLED;
                return BLK_EH_HANDLED;
+       default:
+               break;
        }
 
        /*
@@ -1364,18 +1368,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
                                int qid, int depth)
 {
-       if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
-               unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
-                                                     dev->ctrl.page_size);
-               nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
-               nvmeq->sq_cmds_io = dev->cmb + offset;
-       } else {
-               nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
-                                       &nvmeq->sq_dma_addr, GFP_KERNEL);
-               if (!nvmeq->sq_cmds)
-                       return -ENOMEM;
-       }
+       /* CMB SQEs will be mapped before creation */
+       if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
+               return 0;
 
+       nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+                                           &nvmeq->sq_dma_addr, GFP_KERNEL);
+       if (!nvmeq->sq_cmds)
+               return -ENOMEM;
        return 0;
 }
 
@@ -1449,6 +1449,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        struct nvme_dev *dev = nvmeq->dev;
        int result;
 
+       if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+               unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
+                                                     dev->ctrl.page_size);
+               nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
+               nvmeq->sq_cmds_io = dev->cmb + offset;
+       }
+
        nvmeq->cq_vector = qid - 1;
        result = adapter_alloc_cq(dev, qid, nvmeq);
        if (result < 0)
@@ -2288,12 +2295,12 @@ static void nvme_reset_work(struct work_struct *work)
                nvme_dev_disable(dev, false);
 
        /*
-        * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the
+        * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
         * initializing procedure here.
         */
-       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) {
+       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
                dev_warn(dev->ctrl.device,
-                       "failed to mark controller RECONNECTING\n");
+                       "failed to mark controller CONNECTING\n");
                goto out;
        }
 
index 2bc059f7d73c7da7ea13273aa9a0b92d1cbf2b63..3a51ed50eff24a4c2541b6051a2918f41920467d 100644 (file)
@@ -887,7 +887,7 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
 {
        /* If we are resetting/deleting then do nothing */
-       if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
+       if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
                WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
                        ctrl->ctrl.state == NVME_CTRL_LIVE);
                return;
@@ -973,7 +973,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
        nvme_start_queues(&ctrl->ctrl);
 
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure should never happen */
                WARN_ON_ONCE(1);
                return;
@@ -1756,7 +1756,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
        nvme_stop_ctrl(&ctrl->ctrl);
        nvme_rdma_shutdown_ctrl(ctrl, false);
 
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure should never happen */
                WARN_ON_ONCE(1);
                return;
@@ -1784,11 +1784,8 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
        return;
 
 out_fail:
-       dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
-       nvme_remove_namespaces(&ctrl->ctrl);
-       nvme_rdma_shutdown_ctrl(ctrl, true);
-       nvme_uninit_ctrl(&ctrl->ctrl);
-       nvme_put_ctrl(&ctrl->ctrl);
+       ++ctrl->ctrl.nr_reconnects;
+       nvme_rdma_reconnect_or_remove(ctrl);
 }
 
 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@@ -1942,6 +1939,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        if (!ctrl->queues)
                goto out_uninit_ctrl;
 
+       changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
+       WARN_ON_ONCE(!changed);
+
        ret = nvme_rdma_configure_admin_queue(ctrl, true);
        if (ret)
                goto out_kfree_queues;
index 0a4372a016f21fdb9e9e29ed293dd8fe0524d26b..28bbdff4a88baa27cb2398597cc02529c49e19f4 100644 (file)
@@ -105,10 +105,13 @@ static void nvmet_execute_flush(struct nvmet_req *req)
 static u16 nvmet_discard_range(struct nvmet_ns *ns,
                struct nvme_dsm_range *range, struct bio **bio)
 {
-       if (__blkdev_issue_discard(ns->bdev,
+       int ret;
+
+       ret = __blkdev_issue_discard(ns->bdev,
                        le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
                        le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
-                       GFP_KERNEL, 0, bio))
+                       GFP_KERNEL, 0, bio);
+       if (ret && ret != -EOPNOTSUPP)
                return NVME_SC_INTERNAL | NVME_SC_DNR;
        return 0;
 }
index 36ed84e26d9c262c32c6c600938f6ec53573c683..f46828e3b082b87966d5c5be3d7df1637f0391a9 100644 (file)
@@ -977,11 +977,11 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
        return 0;
 }
 
-static void *
+static const void *
 of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
                                const struct device *dev)
 {
-       return (void *)of_device_get_match_data(dev);
+       return of_device_get_match_data(dev);
 }
 
 const struct fwnode_operations of_fwnode_ops = {
index 2d87bc1adf38b682d5416a72987115eb68737000..0c09107094350456162cd9573c37fa7f9f134df0 100644 (file)
@@ -55,7 +55,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
        if (max_opps <= 0)
                return max_opps ? max_opps : -ENODATA;
 
-       freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
+       freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
        if (!freq_table)
                return -ENOMEM;
 
index 2a68f59d2228c921e8609ccba97832e07cbaa582..c52c6723374b50b26041d7681c3d9ae3564d50ce 100644 (file)
@@ -126,24 +126,6 @@ static const struct dmi_system_id dell_device_table[] __initconst = {
                        DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/
                },
        },
-       {
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_CHASSIS_TYPE, "30"), /*Tablet*/
-               },
-       },
-       {
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /*Convertible*/
-               },
-       },
-       {
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/
-               },
-       },
        {
                .ident = "Dell Computer Corporation",
                .matches = {
@@ -1279,7 +1261,7 @@ static int kbd_get_state(struct kbd_state *state)
        struct calling_interface_buffer buffer;
        int ret;
 
-       dell_fill_request(&buffer, 0, 0, 0, 0);
+       dell_fill_request(&buffer, 0x1, 0, 0, 0);
        ret = dell_send_request(&buffer,
                                CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
        if (ret)
index 5b6f18b188012dcc94b19c9156ee54b42650473b..535199c9e6bc6fa182ea9612fcabf85a7ba02ba8 100644 (file)
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
 /*
  * ACPI Helpers
  */
-#define IDEAPAD_EC_TIMEOUT (100) /* in ms */
+#define IDEAPAD_EC_TIMEOUT (200) /* in ms */
 
 static int read_method_int(acpi_handle handle, const char *method, int *val)
 {
index daa68acbc9003b34615fc43b810c5fb77dd87ecf..c0c8945603cbbdd6284ff60b5df4d118502305d5 100644 (file)
@@ -933,7 +933,7 @@ static int wmi_dev_probe(struct device *dev)
                        goto probe_failure;
                }
 
-               buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL);
+               buf = kmalloc(strlen(wdriver->driver.name) + 5, GFP_KERNEL);
                if (!buf) {
                        ret = -ENOMEM;
                        goto probe_string_failure;
index ba2e0856d22cdfb5396457366276e01bc9ac7851..8f5c1d7f751aee594b763100c2bf6265df9f3a7d 100644 (file)
@@ -1297,6 +1297,9 @@ static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
                vcdev->device_lost = true;
                rc = NOTIFY_DONE;
                break;
+       case CIO_OPER:
+               rc = NOTIFY_OK;
+               break;
        default:
                rc = NOTIFY_DONE;
                break;
@@ -1309,6 +1312,27 @@ static struct ccw_device_id virtio_ids[] = {
        {},
 };
 
+#ifdef CONFIG_PM_SLEEP
+static int virtio_ccw_freeze(struct ccw_device *cdev)
+{
+       struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+       return virtio_device_freeze(&vcdev->vdev);
+}
+
+static int virtio_ccw_restore(struct ccw_device *cdev)
+{
+       struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+       int ret;
+
+       ret = virtio_ccw_set_transport_rev(vcdev);
+       if (ret)
+               return ret;
+
+       return virtio_device_restore(&vcdev->vdev);
+}
+#endif
+
 static struct ccw_driver virtio_ccw_driver = {
        .driver = {
                .owner = THIS_MODULE,
@@ -1321,6 +1345,11 @@ static struct ccw_driver virtio_ccw_driver = {
        .set_online = virtio_ccw_online,
        .notify = virtio_ccw_cio_notify,
        .int_class = IRQIO_VIR,
+#ifdef CONFIG_PM_SLEEP
+       .freeze = virtio_ccw_freeze,
+       .thaw = virtio_ccw_restore,
+       .restore = virtio_ccw_restore,
+#endif
 };
 
 static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
index 5064d5ddf581c2282eba49ab87a668da83a5aeee..fc2013aade51b6be6d281a1e14525cc24fa4f0ca 100644 (file)
@@ -73,6 +73,8 @@ static int __init its_fsl_mc_msi_init(void)
 
        for (np = of_find_matching_node(NULL, its_device_id); np;
             np = of_find_matching_node(np, its_device_id)) {
+               if (!of_device_is_available(np))
+                       continue;
                if (!of_property_read_bool(np, "msi-controller"))
                        continue;
 
index f699abab17875ed26c6d56353c8eb87785664a54..148f3ee7028680b573bef22adebd9c908d75bd0b 100644 (file)
@@ -19,6 +19,12 @@ config USB_EHCI_BIG_ENDIAN_MMIO
 config USB_EHCI_BIG_ENDIAN_DESC
        bool
 
+config USB_UHCI_BIG_ENDIAN_MMIO
+       bool
+
+config USB_UHCI_BIG_ENDIAN_DESC
+       bool
+
 menuconfig USB_SUPPORT
        bool "USB support"
        depends on HAS_IOMEM
index 6150bed7cfa80d19b7f04dc6ae89cef14ce1fb5b..4fcfb3084b3689102657641cfd674656fd1ca5c3 100644 (file)
@@ -633,14 +633,6 @@ config USB_UHCI_ASPEED
        bool
        default y if ARCH_ASPEED
 
-config USB_UHCI_BIG_ENDIAN_MMIO
-       bool
-       default y if SPARC_LEON
-
-config USB_UHCI_BIG_ENDIAN_DESC
-       bool
-       default y if SPARC_LEON
-
 config USB_FHCI_HCD
        tristate "Freescale QE USB Host Controller support"
        depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
index 6082f653c68a448e3e32ea08571f04af97f6acf5..67773e8bbb954b36bdfc7f005e026661c3ba79c9 100644 (file)
@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
        int timeout = 1000;
 
        /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
-       if (cpu_data(0).x86_mask == 1) {
+       if (cpu_data(0).x86_stepping == 1) {
                pll_table = gx_pll_table_14MHz;
                pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
        } else {
index 753d9cb437d02597aa07452dabaac64263d35691..aedbee3b28386a1cc1625334fe431875bd787277 100644 (file)
@@ -60,6 +60,7 @@ struct sock_mapping {
        bool active_socket;
        struct list_head list;
        struct socket *sock;
+       atomic_t refcount;
        union {
                struct {
                        int irq;
@@ -93,6 +94,32 @@ struct sock_mapping {
        };
 };
 
+static inline struct sock_mapping *pvcalls_enter_sock(struct socket *sock)
+{
+       struct sock_mapping *map;
+
+       if (!pvcalls_front_dev ||
+               dev_get_drvdata(&pvcalls_front_dev->dev) == NULL)
+               return ERR_PTR(-ENOTCONN);
+
+       map = (struct sock_mapping *)sock->sk->sk_send_head;
+       if (map == NULL)
+               return ERR_PTR(-ENOTSOCK);
+
+       pvcalls_enter();
+       atomic_inc(&map->refcount);
+       return map;
+}
+
+static inline void pvcalls_exit_sock(struct socket *sock)
+{
+       struct sock_mapping *map;
+
+       map = (struct sock_mapping *)sock->sk->sk_send_head;
+       atomic_dec(&map->refcount);
+       pvcalls_exit();
+}
+
 static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
 {
        *req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
@@ -369,31 +396,23 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
        if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
                return -EOPNOTSUPP;
 
-       pvcalls_enter();
-       if (!pvcalls_front_dev) {
-               pvcalls_exit();
-               return -ENOTCONN;
-       }
+       map = pvcalls_enter_sock(sock);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
 
        bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
 
-       map = (struct sock_mapping *)sock->sk->sk_send_head;
-       if (!map) {
-               pvcalls_exit();
-               return -ENOTSOCK;
-       }
-
        spin_lock(&bedata->socket_lock);
        ret = get_request(bedata, &req_id);
        if (ret < 0) {
                spin_unlock(&bedata->socket_lock);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return ret;
        }
        ret = create_active(map, &evtchn);
        if (ret < 0) {
                spin_unlock(&bedata->socket_lock);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return ret;
        }
 
@@ -423,7 +442,7 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
        smp_rmb();
        ret = bedata->rsp[req_id].ret;
        bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
-       pvcalls_exit();
+       pvcalls_exit_sock(sock);
        return ret;
 }
 
@@ -488,23 +507,15 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
        if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))
                return -EOPNOTSUPP;
 
-       pvcalls_enter();
-       if (!pvcalls_front_dev) {
-               pvcalls_exit();
-               return -ENOTCONN;
-       }
+       map = pvcalls_enter_sock(sock);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
        bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
 
-       map = (struct sock_mapping *) sock->sk->sk_send_head;
-       if (!map) {
-               pvcalls_exit();
-               return -ENOTSOCK;
-       }
-
        mutex_lock(&map->active.out_mutex);
        if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
                mutex_unlock(&map->active.out_mutex);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return -EAGAIN;
        }
        if (len > INT_MAX)
@@ -526,7 +537,7 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
                tot_sent = sent;
 
        mutex_unlock(&map->active.out_mutex);
-       pvcalls_exit();
+       pvcalls_exit_sock(sock);
        return tot_sent;
 }
 
@@ -591,19 +602,11 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
        if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC))
                return -EOPNOTSUPP;
 
-       pvcalls_enter();
-       if (!pvcalls_front_dev) {
-               pvcalls_exit();
-               return -ENOTCONN;
-       }
+       map = pvcalls_enter_sock(sock);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
        bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
 
-       map = (struct sock_mapping *) sock->sk->sk_send_head;
-       if (!map) {
-               pvcalls_exit();
-               return -ENOTSOCK;
-       }
-
        mutex_lock(&map->active.in_mutex);
        if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
                len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
@@ -623,7 +626,7 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                ret = 0;
 
        mutex_unlock(&map->active.in_mutex);
-       pvcalls_exit();
+       pvcalls_exit_sock(sock);
        return ret;
 }
 
@@ -637,24 +640,16 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
        if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
                return -EOPNOTSUPP;
 
-       pvcalls_enter();
-       if (!pvcalls_front_dev) {
-               pvcalls_exit();
-               return -ENOTCONN;
-       }
+       map = pvcalls_enter_sock(sock);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
        bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
 
-       map = (struct sock_mapping *) sock->sk->sk_send_head;
-       if (map == NULL) {
-               pvcalls_exit();
-               return -ENOTSOCK;
-       }
-
        spin_lock(&bedata->socket_lock);
        ret = get_request(bedata, &req_id);
        if (ret < 0) {
                spin_unlock(&bedata->socket_lock);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return ret;
        }
        req = RING_GET_REQUEST(&bedata->ring, req_id);
@@ -684,7 +679,7 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
        bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
 
        map->passive.status = PVCALLS_STATUS_BIND;
-       pvcalls_exit();
+       pvcalls_exit_sock(sock);
        return 0;
 }
 
@@ -695,21 +690,13 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
        struct xen_pvcalls_request *req;
        int notify, req_id, ret;
 
-       pvcalls_enter();
-       if (!pvcalls_front_dev) {
-               pvcalls_exit();
-               return -ENOTCONN;
-       }
+       map = pvcalls_enter_sock(sock);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
        bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
 
-       map = (struct sock_mapping *) sock->sk->sk_send_head;
-       if (!map) {
-               pvcalls_exit();
-               return -ENOTSOCK;
-       }
-
        if (map->passive.status != PVCALLS_STATUS_BIND) {
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return -EOPNOTSUPP;
        }
 
@@ -717,7 +704,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
        ret = get_request(bedata, &req_id);
        if (ret < 0) {
                spin_unlock(&bedata->socket_lock);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return ret;
        }
        req = RING_GET_REQUEST(&bedata->ring, req_id);
@@ -741,7 +728,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
        bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
 
        map->passive.status = PVCALLS_STATUS_LISTEN;
-       pvcalls_exit();
+       pvcalls_exit_sock(sock);
        return ret;
 }
 
@@ -753,21 +740,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
        struct xen_pvcalls_request *req;
        int notify, req_id, ret, evtchn, nonblock;
 
-       pvcalls_enter();
-       if (!pvcalls_front_dev) {
-               pvcalls_exit();
-               return -ENOTCONN;
-       }
+       map = pvcalls_enter_sock(sock);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
        bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
 
-       map = (struct sock_mapping *) sock->sk->sk_send_head;
-       if (!map) {
-               pvcalls_exit();
-               return -ENOTSOCK;
-       }
-
        if (map->passive.status != PVCALLS_STATUS_LISTEN) {
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return -EINVAL;
        }
 
@@ -785,13 +764,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
                        goto received;
                }
                if (nonblock) {
-                       pvcalls_exit();
+                       pvcalls_exit_sock(sock);
                        return -EAGAIN;
                }
                if (wait_event_interruptible(map->passive.inflight_accept_req,
                        !test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
                                          (void *)&map->passive.flags))) {
-                       pvcalls_exit();
+                       pvcalls_exit_sock(sock);
                        return -EINTR;
                }
        }
@@ -802,7 +781,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
                clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
                          (void *)&map->passive.flags);
                spin_unlock(&bedata->socket_lock);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return ret;
        }
        map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
@@ -810,7 +789,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
                clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
                          (void *)&map->passive.flags);
                spin_unlock(&bedata->socket_lock);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return -ENOMEM;
        }
        ret = create_active(map2, &evtchn);
@@ -819,7 +798,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
                clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
                          (void *)&map->passive.flags);
                spin_unlock(&bedata->socket_lock);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return ret;
        }
        list_add_tail(&map2->list, &bedata->socket_mappings);
@@ -841,13 +820,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
        /* We could check if we have received a response before returning. */
        if (nonblock) {
                WRITE_ONCE(map->passive.inflight_req_id, req_id);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return -EAGAIN;
        }
 
        if (wait_event_interruptible(bedata->inflight_req,
                READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) {
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return -EINTR;
        }
        /* read req_id, then the content */
@@ -862,7 +841,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
                clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
                          (void *)&map->passive.flags);
                pvcalls_front_free_map(bedata, map2);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return -ENOMEM;
        }
        newsock->sk->sk_send_head = (void *)map2;
@@ -874,7 +853,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
        clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
        wake_up(&map->passive.inflight_accept_req);
 
-       pvcalls_exit();
+       pvcalls_exit_sock(sock);
        return ret;
 }
 
@@ -965,23 +944,16 @@ __poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
        struct sock_mapping *map;
        __poll_t ret;
 
-       pvcalls_enter();
-       if (!pvcalls_front_dev) {
-               pvcalls_exit();
+       map = pvcalls_enter_sock(sock);
+       if (IS_ERR(map))
                return EPOLLNVAL;
-       }
        bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
 
-       map = (struct sock_mapping *) sock->sk->sk_send_head;
-       if (!map) {
-               pvcalls_exit();
-               return EPOLLNVAL;
-       }
        if (map->active_socket)
                ret = pvcalls_front_poll_active(file, bedata, map, wait);
        else
                ret = pvcalls_front_poll_passive(file, bedata, map, wait);
-       pvcalls_exit();
+       pvcalls_exit_sock(sock);
        return ret;
 }
 
@@ -995,25 +967,20 @@ int pvcalls_front_release(struct socket *sock)
        if (sock->sk == NULL)
                return 0;
 
-       pvcalls_enter();
-       if (!pvcalls_front_dev) {
-               pvcalls_exit();
-               return -EIO;
+       map = pvcalls_enter_sock(sock);
+       if (IS_ERR(map)) {
+               if (PTR_ERR(map) == -ENOTCONN)
+                       return -EIO;
+               else
+                       return 0;
        }
-
        bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
 
-       map = (struct sock_mapping *) sock->sk->sk_send_head;
-       if (map == NULL) {
-               pvcalls_exit();
-               return 0;
-       }
-
        spin_lock(&bedata->socket_lock);
        ret = get_request(bedata, &req_id);
        if (ret < 0) {
                spin_unlock(&bedata->socket_lock);
-               pvcalls_exit();
+               pvcalls_exit_sock(sock);
                return ret;
        }
        sock->sk->sk_send_head = NULL;
@@ -1043,14 +1010,20 @@ int pvcalls_front_release(struct socket *sock)
                /*
                 * We need to make sure that sendmsg/recvmsg on this socket have
                 * not started before we've cleared sk_send_head here. The
-                * easiest (though not optimal) way to guarantee this is to see
-                * that no pvcall (other than us) is in progress.
+                * easiest way to guarantee this is to see that no pvcalls
+                * (other than us) is in progress on this socket.
                 */
-               while (atomic_read(&pvcalls_refcount) > 1)
+               while (atomic_read(&map->refcount) > 1)
                        cpu_relax();
 
                pvcalls_front_free_map(bedata, map);
        } else {
+               wake_up(&bedata->inflight_req);
+               wake_up(&map->passive.inflight_accept_req);
+
+               while (atomic_read(&map->refcount) > 1)
+                       cpu_relax();
+
                spin_lock(&bedata->socket_lock);
                list_del(&map->list);
                spin_unlock(&bedata->socket_lock);
index 149c5e7efc89e75f0fd56a01b40f0f15b462af04..092981171df17ffe076dadd0582566902c03feda 100644 (file)
@@ -76,6 +76,7 @@ struct xb_req_data {
        struct list_head list;
        wait_queue_head_t wq;
        struct xsd_sockmsg msg;
+       uint32_t caller_req_id;
        enum xsd_sockmsg_type type;
        char *body;
        const struct kvec *vec;
index 5b081a01779de8001e48c1c0e2e0f9dea6697332..d239fc3c5e3ded66ad28194fcdc7d09a6df801da 100644 (file)
@@ -309,6 +309,7 @@ static int process_msg(void)
                        goto out;
 
                if (req->state == xb_req_state_wait_reply) {
+                       req->msg.req_id = req->caller_req_id;
                        req->msg.type = state.msg.type;
                        req->msg.len = state.msg.len;
                        req->body = state.body;
index 3e59590c7254ddc8f1a08f4232262a74a29e3711..3f3b29398ab8e2b711ce724cf756ea8c241433cb 100644 (file)
@@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
        req->state = xb_req_state_queued;
        init_waitqueue_head(&req->wq);
 
+       /* Save the caller req_id and restore it later in the reply */
+       req->caller_req_id = req->msg.req_id;
        req->msg.req_id = xs_request_enter(req);
 
        mutex_lock(&xb_write_mutex);
@@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t,
        req->num_vecs = num_vecs;
        req->cb = xs_wake_up;
 
+       msg.req_id = 0;
        msg.tx_id = t.id;
        msg.type = type;
        msg.len = 0;
index e4054e533f6d4d26b783540657d2ea9b78bb97cc..f94b2d8c744a1bfee2dee175ee8f8453aa1e0e8f 100644 (file)
@@ -1264,7 +1264,16 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
        while (node) {
                ref = rb_entry(node, struct prelim_ref, rbnode);
                node = rb_next(&ref->rbnode);
-               WARN_ON(ref->count < 0);
+               /*
+                * ref->count < 0 can happen here if there are delayed
+                * refs with a node->action of BTRFS_DROP_DELAYED_REF.
+                * prelim_ref_insert() relies on this when merging
+                * identical refs to keep the overall count correct.
+                * prelim_ref_insert() will merge only those refs
+                * which compare identically.  Any refs having
+                * e.g. different offsets would not be merged,
+                * and would retain their original ref->count < 0.
+                */
                if (roots && ref->count && ref->root_id && ref->parent == 0) {
                        if (sc && sc->root_objectid &&
                            ref->root_id != sc->root_objectid) {
index a1a40cf382e39c1da97dcd3d8fd3a0a9eda537e2..7ab5e0128f0ce823101f5623a0664c01208f89a7 100644 (file)
@@ -821,7 +821,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
        spin_unlock(&delayed_refs->lock);
 
        if (qrecord_inserted)
-               return btrfs_qgroup_trace_extent_post(fs_info, record);
+               btrfs_qgroup_trace_extent_post(fs_info, record);
+
        return 0;
 
 free_head_ref:
index 05751a677da4fb2fd9a2eee3f3d90ad0071c1ebd..c1618ab9fecfb06a50861f26e4d445fe9d63190e 100644 (file)
@@ -2147,6 +2147,10 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
                        u64 bytes;
                        struct request_queue *req_q;
 
+                       if (!stripe->dev->bdev) {
+                               ASSERT(btrfs_test_opt(fs_info, DEGRADED));
+                               continue;
+                       }
                        req_q = bdev_get_queue(stripe->dev->bdev);
                        if (!blk_queue_discard(req_q))
                                continue;
index 53ca025655fca690b5f92ef93eb0d11047a1a0cc..a79299a89b7d0285328e29cecaaff51ff82c5be7 100644 (file)
@@ -1335,8 +1335,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
                leaf = path->nodes[0];
                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
                        ret = btrfs_next_leaf(root, path);
-                       if (ret < 0)
+                       if (ret < 0) {
+                               if (cow_start != (u64)-1)
+                                       cur_offset = cow_start;
                                goto error;
+                       }
                        if (ret > 0)
                                break;
                        leaf = path->nodes[0];
@@ -3385,6 +3388,11 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
                ret = btrfs_orphan_reserve_metadata(trans, inode);
                ASSERT(!ret);
                if (ret) {
+                       /*
+                        * dec doesn't need spin_lock as ->orphan_block_rsv
+                        * would be released only if ->orphan_inodes is
+                        * zero.
+                        */
                        atomic_dec(&root->orphan_inodes);
                        clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
                                  &inode->runtime_flags);
@@ -3399,12 +3407,17 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
        if (insert >= 1) {
                ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
                if (ret) {
-                       atomic_dec(&root->orphan_inodes);
                        if (reserve) {
                                clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
                                          &inode->runtime_flags);
                                btrfs_orphan_release_metadata(inode);
                        }
+                       /*
+                        * btrfs_orphan_commit_root may race with us and set
+                        * ->orphan_block_rsv to zero, in order to avoid that,
+                        * decrease ->orphan_inodes after everything is done.
+                        */
+                       atomic_dec(&root->orphan_inodes);
                        if (ret != -EEXIST) {
                                clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
                                          &inode->runtime_flags);
@@ -3436,28 +3449,26 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
 {
        struct btrfs_root *root = inode->root;
        int delete_item = 0;
-       int release_rsv = 0;
        int ret = 0;
 
-       spin_lock(&root->orphan_lock);
        if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
                               &inode->runtime_flags))
                delete_item = 1;
 
+       if (delete_item && trans)
+               ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
+
        if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
                               &inode->runtime_flags))
-               release_rsv = 1;
-       spin_unlock(&root->orphan_lock);
+               btrfs_orphan_release_metadata(inode);
 
-       if (delete_item) {
+       /*
+        * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv
+        * to zero, in order to avoid that, decrease ->orphan_inodes after
+        * everything is done.
+        */
+       if (delete_item)
                atomic_dec(&root->orphan_inodes);
-               if (trans)
-                       ret = btrfs_del_orphan_item(trans, root,
-                                                   btrfs_ino(inode));
-       }
-
-       if (release_rsv)
-               btrfs_orphan_release_metadata(inode);
 
        return ret;
 }
@@ -5281,7 +5292,7 @@ void btrfs_evict_inode(struct inode *inode)
        trace_btrfs_inode_evict(inode);
 
        if (!root) {
-               kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
+               clear_inode(inode);
                return;
        }
 
index 9e61dd624f7b5b4e8dde13c47d6e24a8eb8138d5..aa259d6986e1c24049c8f500184b2393d99b0549 100644 (file)
@@ -1442,8 +1442,13 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
        int ret;
 
        ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
-       if (ret < 0)
-               return ret;
+       if (ret < 0) {
+               fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+               btrfs_warn(fs_info,
+"error accounting new delayed refs extent (err code: %d), quota inconsistent",
+                       ret);
+               return 0;
+       }
 
        /*
         * Here we don't need to get the lock of
index afadaadab18e45f5901600be18405d7a74e1a1f6..4fd19b4d667557f8b45b1ec61ef48f79ebb5f1cf 100644 (file)
@@ -29,6 +29,7 @@
 #include "hash.h"
 #include "compression.h"
 #include "qgroup.h"
+#include "inode-map.h"
 
 /* magic values for the inode_only field in btrfs_log_inode:
  *
@@ -2472,6 +2473,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                                        clean_tree_block(fs_info, next);
                                        btrfs_wait_tree_block_writeback(next);
                                        btrfs_tree_unlock(next);
+                               } else {
+                                       if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+                                               clear_extent_buffer_dirty(next);
                                }
 
                                WARN_ON(root_owner !=
@@ -2552,6 +2556,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
                                        clean_tree_block(fs_info, next);
                                        btrfs_wait_tree_block_writeback(next);
                                        btrfs_tree_unlock(next);
+                               } else {
+                                       if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+                                               clear_extent_buffer_dirty(next);
                                }
 
                                WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
@@ -2630,6 +2637,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
                                clean_tree_block(fs_info, next);
                                btrfs_wait_tree_block_writeback(next);
                                btrfs_tree_unlock(next);
+                       } else {
+                               if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+                                       clear_extent_buffer_dirty(next);
                        }
 
                        WARN_ON(log->root_key.objectid !=
@@ -3018,13 +3028,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
 
        while (1) {
                ret = find_first_extent_bit(&log->dirty_log_pages,
-                               0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
+                               0, &start, &end,
+                               EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
                                NULL);
                if (ret)
                        break;
 
                clear_extent_bits(&log->dirty_log_pages, start, end,
-                                 EXTENT_DIRTY | EXTENT_NEW);
+                                 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
        }
 
        /*
@@ -5677,6 +5688,23 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
                                                      path);
                }
 
+               if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
+                       struct btrfs_root *root = wc.replay_dest;
+
+                       btrfs_release_path(path);
+
+                       /*
+                        * We have just replayed everything, and the highest
+                        * objectid of fs roots probably has changed in case
+                        * some inode_item's got replayed.
+                        *
+                        * root->objectid_mutex is not acquired as log replay
+                        * could only happen during mount.
+                        */
+                       ret = btrfs_find_highest_objectid(root,
+                                                 &root->highest_objectid);
+               }
+
                key.offset = found_key.offset - 1;
                wc.replay_dest->log_root = NULL;
                free_extent_buffer(log->node);
index b5036bd69e6a6432bf014b5fb8fb9e0c5baf495e..2ceb924ca0d630334ab81c33b24eef97194b5874 100644 (file)
@@ -645,6 +645,7 @@ static void btrfs_free_stale_devices(const char *path,
                                btrfs_sysfs_remove_fsid(fs_devs);
                                list_del(&fs_devs->list);
                                free_fs_devices(fs_devs);
+                               break;
                        } else {
                                fs_devs->num_devices--;
                                list_del(&dev->dev_list);
index 86863792f36ae1cc60fea8221195824926712e13..86d6a4435c87c31fa27b1ed5b194c2aa626cc801 100644 (file)
@@ -716,7 +716,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
        __be64 *ptr;
        sector_t lblock;
        sector_t lend;
-       int ret;
+       int ret = 0;
        int eob;
        unsigned int len;
        struct buffer_head *bh;
@@ -728,12 +728,14 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
                goto out;
        }
 
-       if ((flags & IOMAP_REPORT) && gfs2_is_stuffed(ip)) {
-               gfs2_stuffed_iomap(inode, iomap);
-               if (pos >= iomap->length)
-                       return -ENOENT;
-               ret = 0;
-               goto out;
+       if (gfs2_is_stuffed(ip)) {
+               if (flags & IOMAP_REPORT) {
+                       gfs2_stuffed_iomap(inode, iomap);
+                       if (pos >= iomap->length)
+                               ret = -ENOENT;
+                       goto out;
+               }
+               BUG_ON(!(flags & IOMAP_WRITE));
        }
 
        lblock = pos >> inode->i_blkbits;
@@ -744,7 +746,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
        iomap->type = IOMAP_HOLE;
        iomap->length = (u64)(lend - lblock) << inode->i_blkbits;
        iomap->flags = IOMAP_F_MERGED;
-       bmap_lock(ip, 0);
+       bmap_lock(ip, flags & IOMAP_WRITE);
 
        /*
         * Directory data blocks have a struct gfs2_meta_header header, so the
@@ -787,27 +789,28 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
                iomap->flags |= IOMAP_F_BOUNDARY;
        iomap->length = (u64)len << inode->i_blkbits;
 
-       ret = 0;
-
 out_release:
        release_metapath(&mp);
-       bmap_unlock(ip, 0);
+       bmap_unlock(ip, flags & IOMAP_WRITE);
 out:
        trace_gfs2_iomap_end(ip, iomap, ret);
        return ret;
 
 do_alloc:
-       if (!(flags & IOMAP_WRITE)) {
-               if (pos >= i_size_read(inode)) {
+       if (flags & IOMAP_WRITE) {
+               ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
+       } else if (flags & IOMAP_REPORT) {
+               loff_t size = i_size_read(inode);
+               if (pos >= size)
                        ret = -ENOENT;
-                       goto out_release;
-               }
-               ret = 0;
-               iomap->length = hole_size(inode, lblock, &mp);
-               goto out_release;
+               else if (height <= ip->i_height)
+                       iomap->length = hole_size(inode, lblock, &mp);
+               else
+                       iomap->length = size - pos;
+       } else {
+               if (height <= ip->i_height)
+                       iomap->length = hole_size(inode, lblock, &mp);
        }
-
-       ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
        goto out_release;
 }
 
index e8a93bc8285d85403db124f9acb3c80f2a122495..d1e82761de813abb95af0faac99194dba5821538 100644 (file)
@@ -510,6 +510,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                        /* we have to zero-fill user buffer even if no read */
                        if (copy_to_user(buffer, buf, tsz))
                                return -EFAULT;
+               } else if (m->type == KCORE_USER) {
+                       /* User page is handled prior to normal kernel page: */
+                       if (copy_to_user(buffer, (char *)start, tsz))
+                               return -EFAULT;
                } else {
                        if (kern_addr_valid(start)) {
                                /*
index bc397573c43ad4f5d4a670f050818ff16ec313c0..67ab280ad13401088a2fb3426e9867f03bd51d26 100644 (file)
@@ -7,7 +7,8 @@
  * @nr: Bit to set
  * @addr: Address to count from
  *
- * This operation is atomic and provides acquire barrier semantics.
+ * This operation is atomic and provides acquire barrier semantics if
+ * the returned value is 0.
  * It can be used to implement bit locks.
  */
 #define test_and_set_bit_lock(nr, addr)        test_and_set_bit(nr, addr)
index 64e10746f28288107131c2bef7e879c0aa5722f4..968173ec2726d64367d18d88558975833366bc5b 100644 (file)
@@ -587,7 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
 const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
                                               const struct device *dev);
 
-void *acpi_get_match_data(const struct device *dev);
+const void *acpi_device_get_match_data(const struct device *dev);
 extern bool acpi_driver_match_device(struct device *dev,
                                     const struct device_driver *drv);
 int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
@@ -766,7 +766,7 @@ static inline const struct acpi_device_id *acpi_match_device(
        return NULL;
 }
 
-static inline void *acpi_get_match_data(const struct device *dev)
+static inline const void *acpi_device_get_match_data(const struct device *dev)
 {
        return NULL;
 }
index 4f3df807cf8f73076ca6e735b901b14360528aa6..ed63f3b69c12b6378feeb52dc91503753ec74284 100644 (file)
@@ -49,7 +49,7 @@ struct blk_stat_callback;
 #define BLKDEV_MIN_RQ  4
 #define BLKDEV_MAX_RQ  128     /* Default maximum */
 
-/* Must be consisitent with blk_mq_poll_stats_bkt() */
+/* Must be consistent with blk_mq_poll_stats_bkt() */
 #define BLK_MQ_POLL_STATS_BKTS 16
 
 /*
index 631354acfa7204757a4ec29e74581dc2ee723a13..73bc63e0a1c4b664f233f176c7694fa8e54e34aa 100644 (file)
 
 #if GCC_VERSION >= 40100
 # define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-
-#define __nostackprotector     __attribute__((__optimize__("no-stack-protector")))
 #endif
 
 #if GCC_VERSION >= 40300
 #endif /* __CHECKER__ */
 #endif /* GCC_VERSION >= 40300 */
 
+#if GCC_VERSION >= 40400
+#define __optimize(level)      __attribute__((__optimize__(level)))
+#define __nostackprotector     __optimize("no-stack-protector")
+#endif /* GCC_VERSION >= 40400 */
+
 #if GCC_VERSION >= 40500
 
 #ifndef __CHECKER__
index c2cc57a2f508f46dd815d79a9626119436486e51..e835fc0423eccf7f66cca38c9dcbfa4f6d065ce3 100644 (file)
@@ -277,6 +277,10 @@ unsigned long read_word_at_a_time(const void *addr)
 
 #endif /* __ASSEMBLY__ */
 
+#ifndef __optimize
+# define __optimize(level)
+#endif
+
 /* Compile time object size, -1 for unknown */
 #ifndef __compiletime_object_size
 # define __compiletime_object_size(obj) -1
index 871f9e21810c8ebd22c3b1686777bc7c258cf8cf..0b3fc229086ca6cb98d24645c487ab097d6566e5 100644 (file)
@@ -225,7 +225,7 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
 }
 #endif
 
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
 void cpuidle_poll_state_init(struct cpuidle_driver *drv);
 #else
 static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
index d4a2a7dcd72d91f0b0a15f8561a27d5838bf041a..bf53d893ad02bbe460dd64ce03d8cfe10d709931 100644 (file)
@@ -170,6 +170,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
        for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
 #define for_each_cpu_not(cpu, mask)            \
        for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#define for_each_cpu_wrap(cpu, mask, start)    \
+       for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
 #define for_each_cpu_and(cpu, mask, and)       \
        for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
 #else
index 34fe8463d10ea3be5df5fb27d80c51d663b94136..eb9eab4ecd6d7a050115b54ac81cde94f15e6591 100644 (file)
@@ -578,7 +578,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 
 /*
  * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
- * don't use this is new code.
+ * don't use this in new code.
  */
 #ifndef arch_dma_supported
 #define arch_dma_supported(dev, mask)  (1)
index 4fa1a489efe4cd6e15d88a269044fbd37dfe054d..4fe8f289b3f6f01d3b5ad94676e2239d208e5229 100644 (file)
@@ -73,8 +73,8 @@ struct fwnode_operations {
        struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
        void (*put)(struct fwnode_handle *fwnode);
        bool (*device_is_available)(const struct fwnode_handle *fwnode);
-       void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
-                                      const struct device *dev);
+       const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
+                                            const struct device *dev);
        bool (*property_present)(const struct fwnode_handle *fwnode,
                                 const char *propname);
        int (*property_read_int_array)(const struct fwnode_handle *fwnode,
index 7ff25a808feffd115e5af4faab56c2542c227c63..80db19d3a5054d5c064727644af49fdf31310111 100644 (file)
@@ -10,6 +10,7 @@ enum kcore_type {
        KCORE_VMALLOC,
        KCORE_RAM,
        KCORE_VMEMMAP,
+       KCORE_USER,
        KCORE_OTHER,
 };
 
index c30b32e3c86248c2f39fa1b926124184fc45b205..10191c28fc04ce22c605d54a87257ee5ff427651 100644 (file)
@@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page)
 
 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
 
-#ifdef arch_unmap_kpfn
-extern void arch_unmap_kpfn(unsigned long pfn);
-#else
-static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
-#endif
-
 #endif
index b99bced39ac2fa1b91cea50a8301336c3cbff133..fbc98e2c8228d0b8ca17e4af9be1cb1cddaccd27 100644 (file)
 static inline unsigned long array_index_mask_nospec(unsigned long index,
                                                    unsigned long size)
 {
-       /*
-        * Warn developers about inappropriate array_index_nospec() usage.
-        *
-        * Even if the CPU speculates past the WARN_ONCE branch, the
-        * sign bit of @index is taken into account when generating the
-        * mask.
-        *
-        * This warning is compiled out when the compiler can infer that
-        * @index and @size are less than LONG_MAX.
-        */
-       if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
-                       "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
-               return 0;
-
        /*
         * Always calculate and emit the mask even if the compiler
         * thinks the mask is not needed. The compiler does not take
@@ -43,6 +29,26 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 }
 #endif
 
+/*
+ * Warn developers about inappropriate array_index_nospec() usage.
+ *
+ * Even if the CPU speculates past the WARN_ONCE branch, the
+ * sign bit of @index is taken into account when generating the
+ * mask.
+ *
+ * This warning is compiled out when the compiler can infer that
+ * @index and @size are less than LONG_MAX.
+ */
+#define array_index_mask_nospec_check(index, size)                             \
+({                                                                             \
+       if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,                      \
+           "array_index_nospec() limited to range of [0, LONG_MAX]\n"))        \
+               _mask = 0;                                                      \
+       else                                                                    \
+               _mask = array_index_mask_nospec(index, size);                   \
+       _mask;                                                                  \
+})
+
 /*
  * array_index_nospec - sanitize an array index after a bounds check
  *
@@ -61,7 +67,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 ({                                                                     \
        typeof(index) _i = (index);                                     \
        typeof(size) _s = (size);                                       \
-       unsigned long _mask = array_index_mask_nospec(_i, _s);          \
+       unsigned long _mask = array_index_mask_nospec_check(_i, _s);    \
                                                                        \
        BUILD_BUG_ON(sizeof(_i) > sizeof(long));                        \
        BUILD_BUG_ON(sizeof(_s) > sizeof(long));                        \
index 769d372c1edf648bd98700fcfdb03f094c1a4620..2eea4b310fc2850e137c0c636b8bcafb24f04f0c 100644 (file)
@@ -283,7 +283,7 @@ bool device_dma_supported(struct device *dev);
 
 enum dev_dma_attr device_get_dma_attr(struct device *dev);
 
-void *device_get_match_data(struct device *dev);
+const void *device_get_match_data(struct device *dev);
 
 int device_get_phy_mode(struct device *dev);
 
index dc368b8ce215ccc0f6a2b44a16dd2aaa603dbcd1..11c86fbfeb985429e11df0b2f9f7fc9767b5ba3e 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Distributed under the terms of the GNU GPL, version 2
  *
- * Please see kernel/semaphore.c for documentation of these functions
+ * Please see kernel/locking/semaphore.c for documentation of these functions
  */
 #ifndef __LINUX_SEMAPHORE_H
 #define __LINUX_SEMAPHORE_H
index 4bb86d379bd581566e1a371655e380b531bb1353..9a4fa0c3264aac22ca794751e7144052e1fe0c35 100644 (file)
@@ -31,7 +31,7 @@
 #define AC97_HEADPHONE         0x04    /* Headphone Volume (optional) */
 #define AC97_MASTER_MONO       0x06    /* Master Volume Mono (optional) */
 #define AC97_MASTER_TONE       0x08    /* Master Tone (Bass & Treble) (optional) */
-#define AC97_PC_BEEP           0x0a    /* PC Beep Volume (optinal) */
+#define AC97_PC_BEEP           0x0a    /* PC Beep Volume (optional) */
 #define AC97_PHONE             0x0c    /* Phone Volume (optional) */
 #define AC97_MIC               0x0e    /* MIC Volume */
 #define AC97_LINE              0x10    /* Line In Volume */
index b8adf05c534e725d1e0e3b614181b008e2488f65..7dd8f34c37dfea26f8ec460d3937a46c8f109f4d 100644 (file)
@@ -368,7 +368,7 @@ TRACE_EVENT(xen_mmu_flush_tlb,
            TP_printk("%s", "")
        );
 
-TRACE_EVENT(xen_mmu_flush_tlb_single,
+TRACE_EVENT(xen_mmu_flush_tlb_one_user,
            TP_PROTO(unsigned long addr),
            TP_ARGS(addr),
            TP_STRUCT__entry(
index e6a9c36470ee93ff524fb93e24fdfced8b057582..82b8b18ee1ebcdf19e873b5bb3d4ab9f2748f539 100644 (file)
@@ -1726,25 +1726,14 @@ static int irq_domain_debug_show(struct seq_file *m, void *p)
        irq_domain_debug_show_one(m, d, 0);
        return 0;
 }
-
-static int irq_domain_debug_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, irq_domain_debug_show, inode->i_private);
-}
-
-static const struct file_operations dfs_domain_ops = {
-       .open           = irq_domain_debug_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
 
 static void debugfs_add_domain_dir(struct irq_domain *d)
 {
        if (!d->name || !domain_dir || d->debugfs_file)
                return;
        d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
-                                             &dfs_domain_ops);
+                                             &irq_domain_debug_fops);
 }
 
 static void debugfs_remove_domain_dir(struct irq_domain *d)
@@ -1760,7 +1749,8 @@ void __init irq_domain_debugfs_init(struct dentry *root)
        if (!domain_dir)
                return;
 
-       debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops);
+       debugfs_create_file("default", 0444, domain_dir, NULL,
+                           &irq_domain_debug_fops);
        mutex_lock(&irq_domain_mutex);
        list_for_each_entry(d, &irq_domain_list, link)
                debugfs_add_domain_dir(d);
index da2ccf14235814df4dc4b21509500d0d777cacb4..102160ff5c661e475e773888bfc3267d832bd4d6 100644 (file)
@@ -978,67 +978,90 @@ static int prepare_kprobe(struct kprobe *p)
 }
 
 /* Caller must lock kprobe_mutex */
-static void arm_kprobe_ftrace(struct kprobe *p)
+static int arm_kprobe_ftrace(struct kprobe *p)
 {
-       int ret;
+       int ret = 0;
 
        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
                                   (unsigned long)p->addr, 0, 0);
-       WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
-       kprobe_ftrace_enabled++;
-       if (kprobe_ftrace_enabled == 1) {
+       if (ret) {
+               pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+               return ret;
+       }
+
+       if (kprobe_ftrace_enabled == 0) {
                ret = register_ftrace_function(&kprobe_ftrace_ops);
-               WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
+               if (ret) {
+                       pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
+                       goto err_ftrace;
+               }
        }
+
+       kprobe_ftrace_enabled++;
+       return ret;
+
+err_ftrace:
+       /*
+        * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
+        * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
+        * empty filter_hash which would undesirably trace all functions.
+        */
+       ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
+       return ret;
 }
 
 /* Caller must lock kprobe_mutex */
-static void disarm_kprobe_ftrace(struct kprobe *p)
+static int disarm_kprobe_ftrace(struct kprobe *p)
 {
-       int ret;
+       int ret = 0;
 
-       kprobe_ftrace_enabled--;
-       if (kprobe_ftrace_enabled == 0) {
+       if (kprobe_ftrace_enabled == 1) {
                ret = unregister_ftrace_function(&kprobe_ftrace_ops);
-               WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
+               if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
+                       return ret;
        }
+
+       kprobe_ftrace_enabled--;
+
        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
                           (unsigned long)p->addr, 1, 0);
        WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
+       return ret;
 }
 #else  /* !CONFIG_KPROBES_ON_FTRACE */
 #define prepare_kprobe(p)      arch_prepare_kprobe(p)
-#define arm_kprobe_ftrace(p)   do {} while (0)
-#define disarm_kprobe_ftrace(p)        do {} while (0)
+#define arm_kprobe_ftrace(p)   (-ENODEV)
+#define disarm_kprobe_ftrace(p)        (-ENODEV)
 #endif
 
 /* Arm a kprobe with text_mutex */
-static void arm_kprobe(struct kprobe *kp)
+static int arm_kprobe(struct kprobe *kp)
 {
-       if (unlikely(kprobe_ftrace(kp))) {
-               arm_kprobe_ftrace(kp);
-               return;
-       }
+       if (unlikely(kprobe_ftrace(kp)))
+               return arm_kprobe_ftrace(kp);
+
        cpus_read_lock();
        mutex_lock(&text_mutex);
        __arm_kprobe(kp);
        mutex_unlock(&text_mutex);
        cpus_read_unlock();
+
+       return 0;
 }
 
 /* Disarm a kprobe with text_mutex */
-static void disarm_kprobe(struct kprobe *kp, bool reopt)
+static int disarm_kprobe(struct kprobe *kp, bool reopt)
 {
-       if (unlikely(kprobe_ftrace(kp))) {
-               disarm_kprobe_ftrace(kp);
-               return;
-       }
+       if (unlikely(kprobe_ftrace(kp)))
+               return disarm_kprobe_ftrace(kp);
 
        cpus_read_lock();
        mutex_lock(&text_mutex);
        __disarm_kprobe(kp, reopt);
        mutex_unlock(&text_mutex);
        cpus_read_unlock();
+
+       return 0;
 }
 
 /*
@@ -1362,9 +1385,15 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
 
        if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
                ap->flags &= ~KPROBE_FLAG_DISABLED;
-               if (!kprobes_all_disarmed)
+               if (!kprobes_all_disarmed) {
                        /* Arm the breakpoint again. */
-                       arm_kprobe(ap);
+                       ret = arm_kprobe(ap);
+                       if (ret) {
+                               ap->flags |= KPROBE_FLAG_DISABLED;
+                               list_del_rcu(&p->list);
+                               synchronize_sched();
+                       }
+               }
        }
        return ret;
 }
@@ -1573,8 +1602,14 @@ int register_kprobe(struct kprobe *p)
        hlist_add_head_rcu(&p->hlist,
                       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
 
-       if (!kprobes_all_disarmed && !kprobe_disabled(p))
-               arm_kprobe(p);
+       if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
+               ret = arm_kprobe(p);
+               if (ret) {
+                       hlist_del_rcu(&p->hlist);
+                       synchronize_sched();
+                       goto out;
+               }
+       }
 
        /* Try to optimize kprobe */
        try_to_optimize_kprobe(p);
@@ -1608,11 +1643,12 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
 static struct kprobe *__disable_kprobe(struct kprobe *p)
 {
        struct kprobe *orig_p;
+       int ret;
 
        /* Get an original kprobe for return */
        orig_p = __get_valid_kprobe(p);
        if (unlikely(orig_p == NULL))
-               return NULL;
+               return ERR_PTR(-EINVAL);
 
        if (!kprobe_disabled(p)) {
                /* Disable probe if it is a child probe */
@@ -1626,8 +1662,13 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
                         * should have already been disarmed, so
                         * skip unneed disarming process.
                         */
-                       if (!kprobes_all_disarmed)
-                               disarm_kprobe(orig_p, true);
+                       if (!kprobes_all_disarmed) {
+                               ret = disarm_kprobe(orig_p, true);
+                               if (ret) {
+                                       p->flags &= ~KPROBE_FLAG_DISABLED;
+                                       return ERR_PTR(ret);
+                               }
+                       }
                        orig_p->flags |= KPROBE_FLAG_DISABLED;
                }
        }
@@ -1644,8 +1685,8 @@ static int __unregister_kprobe_top(struct kprobe *p)
 
        /* Disable kprobe. This will disarm it if needed. */
        ap = __disable_kprobe(p);
-       if (ap == NULL)
-               return -EINVAL;
+       if (IS_ERR(ap))
+               return PTR_ERR(ap);
 
        if (ap == p)
                /*
@@ -2078,12 +2119,14 @@ static void kill_kprobe(struct kprobe *p)
 int disable_kprobe(struct kprobe *kp)
 {
        int ret = 0;
+       struct kprobe *p;
 
        mutex_lock(&kprobe_mutex);
 
        /* Disable this kprobe */
-       if (__disable_kprobe(kp) == NULL)
-               ret = -EINVAL;
+       p = __disable_kprobe(kp);
+       if (IS_ERR(p))
+               ret = PTR_ERR(p);
 
        mutex_unlock(&kprobe_mutex);
        return ret;
@@ -2116,7 +2159,9 @@ int enable_kprobe(struct kprobe *kp)
 
        if (!kprobes_all_disarmed && kprobe_disabled(p)) {
                p->flags &= ~KPROBE_FLAG_DISABLED;
-               arm_kprobe(p);
+               ret = arm_kprobe(p);
+               if (ret)
+                       p->flags |= KPROBE_FLAG_DISABLED;
        }
 out:
        mutex_unlock(&kprobe_mutex);
@@ -2407,11 +2452,12 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
        .release        = seq_release,
 };
 
-static void arm_all_kprobes(void)
+static int arm_all_kprobes(void)
 {
        struct hlist_head *head;
        struct kprobe *p;
-       unsigned int i;
+       unsigned int i, total = 0, errors = 0;
+       int err, ret = 0;
 
        mutex_lock(&kprobe_mutex);
 
@@ -2428,46 +2474,74 @@ static void arm_all_kprobes(void)
        /* Arming kprobes doesn't optimize kprobe itself */
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, head, hlist)
-                       if (!kprobe_disabled(p))
-                               arm_kprobe(p);
+               /* Arm all kprobes on a best-effort basis */
+               hlist_for_each_entry_rcu(p, head, hlist) {
+                       if (!kprobe_disabled(p)) {
+                               err = arm_kprobe(p);
+                               if (err)  {
+                                       errors++;
+                                       ret = err;
+                               }
+                               total++;
+                       }
+               }
        }
 
-       printk(KERN_INFO "Kprobes globally enabled\n");
+       if (errors)
+               pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
+                       errors, total);
+       else
+               pr_info("Kprobes globally enabled\n");
 
 already_enabled:
        mutex_unlock(&kprobe_mutex);
-       return;
+       return ret;
 }
 
-static void disarm_all_kprobes(void)
+static int disarm_all_kprobes(void)
 {
        struct hlist_head *head;
        struct kprobe *p;
-       unsigned int i;
+       unsigned int i, total = 0, errors = 0;
+       int err, ret = 0;
 
        mutex_lock(&kprobe_mutex);
 
        /* If kprobes are already disarmed, just return */
        if (kprobes_all_disarmed) {
                mutex_unlock(&kprobe_mutex);
-               return;
+               return 0;
        }
 
        kprobes_all_disarmed = true;
-       printk(KERN_INFO "Kprobes globally disabled\n");
 
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
+               /* Disarm all kprobes on a best-effort basis */
                hlist_for_each_entry_rcu(p, head, hlist) {
-                       if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
-                               disarm_kprobe(p, false);
+                       if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
+                               err = disarm_kprobe(p, false);
+                               if (err) {
+                                       errors++;
+                                       ret = err;
+                               }
+                               total++;
+                       }
                }
        }
+
+       if (errors)
+               pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
+                       errors, total);
+       else
+               pr_info("Kprobes globally disabled\n");
+
        mutex_unlock(&kprobe_mutex);
 
        /* Wait for disarming all kprobes by optimizer */
        wait_for_kprobe_optimizer();
+
+       return ret;
 }
 
 /*
@@ -2494,6 +2568,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
 {
        char buf[32];
        size_t buf_size;
+       int ret = 0;
 
        buf_size = min(count, (sizeof(buf)-1));
        if (copy_from_user(buf, user_buf, buf_size))
@@ -2504,17 +2579,20 @@ static ssize_t write_enabled_file_bool(struct file *file,
        case 'y':
        case 'Y':
        case '1':
-               arm_all_kprobes();
+               ret = arm_all_kprobes();
                break;
        case 'n':
        case 'N':
        case '0':
-               disarm_all_kprobes();
+               ret = disarm_all_kprobes();
                break;
        default:
                return -EINVAL;
        }
 
+       if (ret)
+               return ret;
+
        return count;
 }
 
index 38ece035039e35bf997e4161ec8e84cceaf4508f..d880296245c5954c432c539e2f14f3ade1c4719b 100644 (file)
@@ -379,6 +379,14 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
        tail = encode_tail(smp_processor_id(), idx);
 
        node += idx;
+
+       /*
+        * Ensure that we increment the head node->count before initialising
+        * the actual node. If the compiler is kind enough to reorder these
+        * stores, then an IRQ could overwrite our assignments.
+        */
+       barrier();
+
        node->locked = 0;
        node->next = NULL;
        pv_init_node(node);
@@ -408,14 +416,15 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
         */
        if (old & _Q_TAIL_MASK) {
                prev = decode_tail(old);
+
                /*
-                * The above xchg_tail() is also a load of @lock which
-                * generates, through decode_tail(), a pointer.  The address
-                * dependency matches the RELEASE of xchg_tail() such that
-                * the subsequent access to @prev happens after.
+                * We must ensure that the stores to @node are observed before
+                * the write to prev->next. The address dependency from
+                * xchg_tail is not sufficient to ensure this because the read
+                * component of xchg_tail is unordered with respect to the
+                * initialisation of @node.
                 */
-
-               WRITE_ONCE(prev->next, node);
+               smp_store_release(&prev->next, node);
 
                pv_wait_node(node, prev);
                arch_mcs_spin_lock_contended(&node->locked);
index bf724c1952eaca3b1eb7cb6bf83b2fa436068f4d..e7c535eee0a6d493a2a43eba210c08c6858b63d1 100644 (file)
@@ -2601,19 +2601,31 @@ static inline void finish_task(struct task_struct *prev)
 #endif
 }
 
-static inline void finish_lock_switch(struct rq *rq)
+static inline void
+prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
 {
+       /*
+        * Since the runqueue lock will be released by the next
+        * task (which is an invalid locking op but in the case
+        * of the scheduler it's an obvious special-case), so we
+        * do an early lockdep release here:
+        */
+       rq_unpin_lock(rq, rf);
+       spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
 #ifdef CONFIG_DEBUG_SPINLOCK
        /* this is a valid case when another task releases the spinlock */
-       rq->lock.owner = current;
+       rq->lock.owner = next;
 #endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
        /*
         * If we are tracking spinlock dependencies then we have to
         * fix up the runqueue lock - which gets 'carried over' from
         * prev into current:
         */
        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
        raw_spin_unlock_irq(&rq->lock);
 }
 
@@ -2844,14 +2856,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
 
        rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
 
-       /*
-        * Since the runqueue lock will be released by the next
-        * task (which is an invalid locking op but in the case
-        * of the scheduler it's an obvious special-case), so we
-        * do an early lockdep release here:
-        */
-       rq_unpin_lock(rq, rf);
-       spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
+       prepare_lock_switch(rq, next, rf);
 
        /* Here we just switch the register state and the stack. */
        switch_to(prev, next, prev);
index dd062a1c8cf043a26882e3b694d7c15588a2718c..7936f548e071e201a2125981dedce3e162711a24 100644 (file)
@@ -19,8 +19,6 @@
 
 #include "sched.h"
 
-#define SUGOV_KTHREAD_PRIORITY 50
-
 struct sugov_tunables {
        struct gov_attr_set attr_set;
        unsigned int rate_limit_us;
index 9bb0e0c412ec6617c5ef6cfa0b4f703a6a536a23..9df09782025cb54d3d381ed696624211c6e23769 100644 (file)
@@ -1153,6 +1153,7 @@ static void update_curr_dl(struct rq *rq)
        struct sched_dl_entity *dl_se = &curr->dl;
        u64 delta_exec, scaled_delta_exec;
        int cpu = cpu_of(rq);
+       u64 now;
 
        if (!dl_task(curr) || !on_dl_rq(dl_se))
                return;
@@ -1165,7 +1166,8 @@ static void update_curr_dl(struct rq *rq)
         * natural solution, but the full ramifications of this
         * approach need further study.
         */
-       delta_exec = rq_clock_task(rq) - curr->se.exec_start;
+       now = rq_clock_task(rq);
+       delta_exec = now - curr->se.exec_start;
        if (unlikely((s64)delta_exec <= 0)) {
                if (unlikely(dl_se->dl_yielded))
                        goto throttle;
@@ -1178,7 +1180,7 @@ static void update_curr_dl(struct rq *rq)
        curr->se.sum_exec_runtime += delta_exec;
        account_group_exec_runtime(curr, delta_exec);
 
-       curr->se.exec_start = rq_clock_task(rq);
+       curr->se.exec_start = now;
        cgroup_account_cputime(curr, delta_exec);
 
        sched_rt_avg_update(rq, delta_exec);
index 663b2355a3aa772d8bcc8c90b55a3e0e0e3a6e17..aad49451584e6766d1b9a2f657397da345146c23 100644 (file)
@@ -950,12 +950,13 @@ static void update_curr_rt(struct rq *rq)
 {
        struct task_struct *curr = rq->curr;
        struct sched_rt_entity *rt_se = &curr->rt;
-       u64 now = rq_clock_task(rq);
        u64 delta_exec;
+       u64 now;
 
        if (curr->sched_class != &rt_sched_class)
                return;
 
+       now = rq_clock_task(rq);
        delta_exec = now - curr->se.exec_start;
        if (unlikely((s64)delta_exec <= 0))
                return;
index 40b1f92f2214e8de08109b70ef4f999346b20b99..c9e8e21cb33406f33b539cfd25d82b5749071422 100644 (file)
@@ -84,6 +84,10 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
        return page_address(page);
 }
 
+/*
+ * NOTE: this function must never look at the dma_addr argument, because we want
+ * to be able to use it as a helper for iommu implementations as well.
+ */
 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs)
 {
@@ -152,5 +156,6 @@ const struct dma_map_ops dma_direct_ops = {
        .map_sg                 = dma_direct_map_sg,
        .dma_supported          = dma_direct_supported,
        .mapping_error          = dma_direct_mapping_error,
+       .is_phys                = 1,
 };
 EXPORT_SYMBOL(dma_direct_ops);
index 4b80ccee4535f103552735fe56d4362302692c96..8291b75f42c8494c80b35a5b5667ded79a126a96 100644 (file)
@@ -1139,8 +1139,6 @@ int memory_failure(unsigned long pfn, int flags)
                return 0;
        }
 
-       arch_unmap_kpfn(pfn);
-
        orig_head = hpage = compound_head(p);
        num_poisoned_pages_inc();
 
index dd8de96f55475c8de7edae699dd10d0adb8c4fb6..5fcfc24904d199dc1869205a8251a8e18bc115bf 100644 (file)
@@ -80,7 +80,7 @@
 
 #include "internal.h"
 
-#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
 #endif
 
index f3a4efcf1456422a6c6f036e8b7364f89b407bfd..3aa5a93ad107c1490146240a112e3a9bf3a89b62 100644 (file)
@@ -160,7 +160,8 @@ static void req_done(struct virtqueue *vq)
                spin_unlock_irqrestore(&chan->lock, flags);
                /* Wakeup if anyone waiting for VirtIO ring space. */
                wake_up(chan->vc_wq);
-               p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
+               if (len)
+                       p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
        }
 }
 
index f8a64e15e5bfb8c94f8d376d261f14ea610f99ec..baa5f8ef89d259d372bb5f15dcdeb9515af88c6e 100644 (file)
@@ -5,7 +5,6 @@
 
 config AC97_BUS_NEW
        tristate
-       select AC97
        help
          This is the new AC97 bus type, successor of AC97_BUS. The ported
          drivers which benefit from the AC97 automatic probing should "select"
index 60db32785f6229773fdf752a93e01f7bf42398eb..04d4db44fae5c9199754aa80066ae0c1220a41d5 100644 (file)
@@ -1003,7 +1003,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
 {
        struct snd_seq_client *client = file->private_data;
        int written = 0, len;
-       int err = -EINVAL;
+       int err;
        struct snd_seq_event event;
 
        if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@@ -1018,11 +1018,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
 
        /* allocate the pool now if the pool is not allocated yet */ 
        if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
-               if (snd_seq_pool_init(client->pool) < 0)
+               mutex_lock(&client->ioctl_mutex);
+               err = snd_seq_pool_init(client->pool);
+               mutex_unlock(&client->ioctl_mutex);
+               if (err < 0)
                        return -ENOMEM;
        }
 
        /* only process whole events */
+       err = -EINVAL;
        while (count >= sizeof(struct snd_seq_event)) {
                /* Read in the event header from the user */
                len = sizeof(event);
index 23475888192b5b5b0b1657ad1def3c38c57c5e9a..ce28f7ce64e63774655a421bf1102b7c354786dc 100644 (file)
@@ -3465,6 +3465,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
 }
 
+static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
+                                                const struct hda_fixup *fix,
+                                                int action)
+{
+       unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
+       unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
+
+       if (cfg_headphone && cfg_headset_mic == 0x411111f0)
+               snd_hda_codec_set_pincfg(codec, 0x19,
+                       (cfg_headphone & ~AC_DEFCFG_DEVICE) |
+                       (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
+}
+
 static void alc269_fixup_hweq(struct hda_codec *codec,
                               const struct hda_fixup *fix, int action)
 {
@@ -4972,6 +4985,28 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
        }
 }
 
+static void alc_fixup_tpt470_dock(struct hda_codec *codec,
+                                 const struct hda_fixup *fix, int action)
+{
+       static const struct hda_pintbl pincfgs[] = {
+               { 0x17, 0x21211010 }, /* dock headphone */
+               { 0x19, 0x21a11010 }, /* dock mic */
+               { }
+       };
+       struct alc_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+               /* Enable DOCK device */
+               snd_hda_codec_write(codec, 0x17, 0,
+                           AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
+               /* Enable DOCK device */
+               snd_hda_codec_write(codec, 0x19, 0,
+                           AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
+               snd_hda_apply_pincfgs(codec, pincfgs);
+       }
+}
+
 static void alc_shutup_dell_xps13(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
@@ -5351,6 +5386,7 @@ enum {
        ALC269_FIXUP_LIFEBOOK_EXTMIC,
        ALC269_FIXUP_LIFEBOOK_HP_PIN,
        ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
+       ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
        ALC269_FIXUP_AMIC,
        ALC269_FIXUP_DMIC,
        ALC269VB_FIXUP_AMIC,
@@ -5446,6 +5482,7 @@ enum {
        ALC700_FIXUP_INTEL_REFERENCE,
        ALC274_FIXUP_DELL_BIND_DACS,
        ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+       ALC298_FIXUP_TPT470_DOCK,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5556,6 +5593,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
        },
+       [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
+       },
        [ALC269_FIXUP_AMIC] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -6271,6 +6312,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC274_FIXUP_DELL_BIND_DACS
        },
+       [ALC298_FIXUP_TPT470_DOCK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_tpt470_dock,
+               .chained = true,
+               .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6321,6 +6368,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+       SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6422,6 +6471,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
        SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
        SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+       SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
@@ -6450,8 +6500,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x222d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
+       SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
@@ -6472,7 +6530,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
+       SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
@@ -6734,6 +6797,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0xb7a60130},
                {0x14, 0x90170110},
                {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x14, 0x01011020},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
@@ -6803,6 +6871,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60120},
                {0x14, 0x90170110},
                {0x21, 0x0321101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0xb7a60130},
+               {0x14, 0x90170110},
+               {0x21, 0x04211020}),
        SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
                ALC290_STANDARD_PINS,
                {0x15, 0x04211040},
index 9afb8ab524c7ebfc7979794b169105368a202fce..06b22624ab7a0d9b832980a1d58a4c2a21df3844 100644 (file)
@@ -347,17 +347,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
                            int validx, int *value_ret)
 {
        struct snd_usb_audio *chip = cval->head.mixer->chip;
-       unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
+       /* enough space for one range */
+       unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)];
        unsigned char *val;
-       int idx = 0, ret, size;
+       int idx = 0, ret, val_size, size;
        __u8 bRequest;
 
+       val_size = uac2_ctl_value_size(cval->val_type);
+
        if (request == UAC_GET_CUR) {
                bRequest = UAC2_CS_CUR;
-               size = uac2_ctl_value_size(cval->val_type);
+               size = val_size;
        } else {
                bRequest = UAC2_CS_RANGE;
-               size = sizeof(buf);
+               size = sizeof(__u16) + 3 * val_size;
        }
 
        memset(buf, 0, sizeof(buf));
@@ -390,16 +393,17 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
                val = buf + sizeof(__u16);
                break;
        case UAC_GET_MAX:
-               val = buf + sizeof(__u16) * 2;
+               val = buf + sizeof(__u16) + val_size;
                break;
        case UAC_GET_RES:
-               val = buf + sizeof(__u16) * 3;
+               val = buf + sizeof(__u16) + val_size * 2;
                break;
        default:
                return -EINVAL;
        }
 
-       *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16)));
+       *value_ret = convert_signed_value(cval,
+                                         snd_usb_combine_bytes(val, val_size));
 
        return 0;
 }
index b9c9a19f9588a8c2eab67e6bd2ad8e5b531c7655..3cbfae6604f98b77f5cc6fe12c32ce4f7ce477c7 100644 (file)
@@ -352,6 +352,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
                ep = 0x86;
                iface = usb_ifnum_to_if(dev, 2);
 
+               if (!iface || iface->num_altsetting == 0)
+                       return -EINVAL;
+
+               alts = &iface->altsetting[1];
+               goto add_sync_ep;
+       case USB_ID(0x1397, 0x0002):
+               ep = 0x81;
+               iface = usb_ifnum_to_if(dev, 1);
+
                if (!iface || iface->num_altsetting == 0)
                        return -EINVAL;
 
index a66ef5777887a78d7416e64c049c73b26477c7f7..ea8f3de92fa4bedf6914b91e7f6dbea3841ac8de 100644 (file)
@@ -1363,8 +1363,11 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
 
-       /* Amanero Combo384 USB interface with native DSD support */
-       case USB_ID(0x16d0, 0x071a):
+       /* Amanero Combo384 USB based DACs with native DSD support */
+       case USB_ID(0x16d0, 0x071a):  /* Amanero - Combo384 */
+       case USB_ID(0x2ab6, 0x0004):  /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */
+       case USB_ID(0x2ab6, 0x0005):  /* T+A USB HD Audio 1 */
+       case USB_ID(0x2ab6, 0x0006):  /* T+A USB HD Audio 2 */
                if (fp->altsetting == 2) {
                        switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
                        case 0x199:
index 637b7263cb867f09618cc2a5e7b525686a0ea267..833ed9a16adfd03e0b6cb70adc19fe03055f7344 100644 (file)
@@ -632,6 +632,8 @@ struct kvm_ppc_cpu_char {
 #define KVM_REG_PPC_TIDR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
 #define KVM_REG_PPC_PSSCR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
 
+#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
+
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
  */
diff --git a/tools/arch/s390/include/uapi/asm/unistd.h b/tools/arch/s390/include/uapi/asm/unistd.h
deleted file mode 100644 (file)
index 7251209..0000000
+++ /dev/null
@@ -1,412 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- *  S390 version
- *
- *  Derived from "include/asm-i386/unistd.h"
- */
-
-#ifndef _UAPI_ASM_S390_UNISTD_H_
-#define _UAPI_ASM_S390_UNISTD_H_
-
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_exit                 1
-#define __NR_fork                 2
-#define __NR_read                 3
-#define __NR_write                4
-#define __NR_open                 5
-#define __NR_close                6
-#define __NR_restart_syscall     7
-#define __NR_creat                8
-#define __NR_link                 9
-#define __NR_unlink              10
-#define __NR_execve              11
-#define __NR_chdir               12
-#define __NR_mknod               14
-#define __NR_chmod               15
-#define __NR_lseek               19
-#define __NR_getpid              20
-#define __NR_mount               21
-#define __NR_umount              22
-#define __NR_ptrace              26
-#define __NR_alarm               27
-#define __NR_pause               29
-#define __NR_utime               30
-#define __NR_access              33
-#define __NR_nice                34
-#define __NR_sync                36
-#define __NR_kill                37
-#define __NR_rename              38
-#define __NR_mkdir               39
-#define __NR_rmdir               40
-#define __NR_dup                 41
-#define __NR_pipe                42
-#define __NR_times               43
-#define __NR_brk                 45
-#define __NR_signal              48
-#define __NR_acct                51
-#define __NR_umount2             52
-#define __NR_ioctl               54
-#define __NR_fcntl               55
-#define __NR_setpgid             57
-#define __NR_umask               60
-#define __NR_chroot              61
-#define __NR_ustat               62
-#define __NR_dup2                63
-#define __NR_getppid             64
-#define __NR_getpgrp             65
-#define __NR_setsid              66
-#define __NR_sigaction           67
-#define __NR_sigsuspend          72
-#define __NR_sigpending          73
-#define __NR_sethostname         74
-#define __NR_setrlimit           75
-#define __NR_getrusage           77
-#define __NR_gettimeofday        78
-#define __NR_settimeofday        79
-#define __NR_symlink             83
-#define __NR_readlink            85
-#define __NR_uselib              86
-#define __NR_swapon              87
-#define __NR_reboot              88
-#define __NR_readdir             89
-#define __NR_mmap                90
-#define __NR_munmap              91
-#define __NR_truncate            92
-#define __NR_ftruncate           93
-#define __NR_fchmod              94
-#define __NR_getpriority         96
-#define __NR_setpriority         97
-#define __NR_statfs              99
-#define __NR_fstatfs            100
-#define __NR_socketcall         102
-#define __NR_syslog             103
-#define __NR_setitimer          104
-#define __NR_getitimer          105
-#define __NR_stat               106
-#define __NR_lstat              107
-#define __NR_fstat              108
-#define __NR_lookup_dcookie     110
-#define __NR_vhangup            111
-#define __NR_idle               112
-#define __NR_wait4              114
-#define __NR_swapoff            115
-#define __NR_sysinfo            116
-#define __NR_ipc                117
-#define __NR_fsync              118
-#define __NR_sigreturn          119
-#define __NR_clone              120
-#define __NR_setdomainname      121
-#define __NR_uname              122
-#define __NR_adjtimex           124
-#define __NR_mprotect           125
-#define __NR_sigprocmask        126
-#define __NR_create_module      127
-#define __NR_init_module        128
-#define __NR_delete_module      129
-#define __NR_get_kernel_syms    130
-#define __NR_quotactl           131
-#define __NR_getpgid            132
-#define __NR_fchdir             133
-#define __NR_bdflush            134
-#define __NR_sysfs              135
-#define __NR_personality        136
-#define __NR_afs_syscall        137 /* Syscall for Andrew File System */
-#define __NR_getdents           141
-#define __NR_flock              143
-#define __NR_msync              144
-#define __NR_readv              145
-#define __NR_writev             146
-#define __NR_getsid             147
-#define __NR_fdatasync          148
-#define __NR__sysctl            149
-#define __NR_mlock              150
-#define __NR_munlock            151
-#define __NR_mlockall           152
-#define __NR_munlockall         153
-#define __NR_sched_setparam             154
-#define __NR_sched_getparam             155
-#define __NR_sched_setscheduler         156
-#define __NR_sched_getscheduler         157
-#define __NR_sched_yield                158
-#define __NR_sched_get_priority_max     159
-#define __NR_sched_get_priority_min     160
-#define __NR_sched_rr_get_interval      161
-#define __NR_nanosleep          162
-#define __NR_mremap             163
-#define __NR_query_module       167
-#define __NR_poll               168
-#define __NR_nfsservctl         169
-#define __NR_prctl              172
-#define __NR_rt_sigreturn       173
-#define __NR_rt_sigaction       174
-#define __NR_rt_sigprocmask     175
-#define __NR_rt_sigpending      176
-#define __NR_rt_sigtimedwait    177
-#define __NR_rt_sigqueueinfo    178
-#define __NR_rt_sigsuspend      179
-#define __NR_pread64            180
-#define __NR_pwrite64           181
-#define __NR_getcwd             183
-#define __NR_capget             184
-#define __NR_capset             185
-#define __NR_sigaltstack        186
-#define __NR_sendfile           187
-#define __NR_getpmsg           188
-#define __NR_putpmsg           189
-#define __NR_vfork             190
-#define __NR_pivot_root         217
-#define __NR_mincore            218
-#define __NR_madvise            219
-#define __NR_getdents64                220
-#define __NR_readahead         222
-#define __NR_setxattr          224
-#define __NR_lsetxattr         225
-#define __NR_fsetxattr         226
-#define __NR_getxattr          227
-#define __NR_lgetxattr         228
-#define __NR_fgetxattr         229
-#define __NR_listxattr         230
-#define __NR_llistxattr                231
-#define __NR_flistxattr                232
-#define __NR_removexattr       233
-#define __NR_lremovexattr      234
-#define __NR_fremovexattr      235
-#define __NR_gettid            236
-#define __NR_tkill             237
-#define __NR_futex             238
-#define __NR_sched_setaffinity 239
-#define __NR_sched_getaffinity 240
-#define __NR_tgkill            241
-/* Number 242 is reserved for tux */
-#define __NR_io_setup          243
-#define __NR_io_destroy                244
-#define __NR_io_getevents      245
-#define __NR_io_submit         246
-#define __NR_io_cancel         247
-#define __NR_exit_group                248
-#define __NR_epoll_create      249
-#define __NR_epoll_ctl         250
-#define __NR_epoll_wait                251
-#define __NR_set_tid_address   252
-#define __NR_fadvise64         253
-#define __NR_timer_create      254
-#define __NR_timer_settime     255
-#define __NR_timer_gettime     256
-#define __NR_timer_getoverrun  257
-#define __NR_timer_delete      258
-#define __NR_clock_settime     259
-#define __NR_clock_gettime     260
-#define __NR_clock_getres      261
-#define __NR_clock_nanosleep   262
-/* Number 263 is reserved for vserver */
-#define __NR_statfs64          265
-#define __NR_fstatfs64         266
-#define __NR_remap_file_pages  267
-#define __NR_mbind             268
-#define __NR_get_mempolicy     269
-#define __NR_set_mempolicy     270
-#define __NR_mq_open           271
-#define __NR_mq_unlink         272
-#define __NR_mq_timedsend      273
-#define __NR_mq_timedreceive   274
-#define __NR_mq_notify         275
-#define __NR_mq_getsetattr     276
-#define __NR_kexec_load                277
-#define __NR_add_key           278
-#define __NR_request_key       279
-#define __NR_keyctl            280
-#define __NR_waitid            281
-#define __NR_ioprio_set                282
-#define __NR_ioprio_get                283
-#define __NR_inotify_init      284
-#define __NR_inotify_add_watch 285
-#define __NR_inotify_rm_watch  286
-#define __NR_migrate_pages     287
-#define __NR_openat            288
-#define __NR_mkdirat           289
-#define __NR_mknodat           290
-#define __NR_fchownat          291
-#define __NR_futimesat         292
-#define __NR_unlinkat          294
-#define __NR_renameat          295
-#define __NR_linkat            296
-#define __NR_symlinkat         297
-#define __NR_readlinkat                298
-#define __NR_fchmodat          299
-#define __NR_faccessat         300
-#define __NR_pselect6          301
-#define __NR_ppoll             302
-#define __NR_unshare           303
-#define __NR_set_robust_list   304
-#define __NR_get_robust_list   305
-#define __NR_splice            306
-#define __NR_sync_file_range   307
-#define __NR_tee               308
-#define __NR_vmsplice          309
-#define __NR_move_pages                310
-#define __NR_getcpu            311
-#define __NR_epoll_pwait       312
-#define __NR_utimes            313
-#define __NR_fallocate         314
-#define __NR_utimensat         315
-#define __NR_signalfd          316
-#define __NR_timerfd           317
-#define __NR_eventfd           318
-#define __NR_timerfd_create    319
-#define __NR_timerfd_settime   320
-#define __NR_timerfd_gettime   321
-#define __NR_signalfd4         322
-#define __NR_eventfd2          323
-#define __NR_inotify_init1     324
-#define __NR_pipe2             325
-#define __NR_dup3              326
-#define __NR_epoll_create1     327
-#define        __NR_preadv             328
-#define        __NR_pwritev            329
-#define __NR_rt_tgsigqueueinfo 330
-#define __NR_perf_event_open   331
-#define __NR_fanotify_init     332
-#define __NR_fanotify_mark     333
-#define __NR_prlimit64         334
-#define __NR_name_to_handle_at 335
-#define __NR_open_by_handle_at 336
-#define __NR_clock_adjtime     337
-#define __NR_syncfs            338
-#define __NR_setns             339
-#define __NR_process_vm_readv  340
-#define __NR_process_vm_writev 341
-#define __NR_s390_runtime_instr 342
-#define __NR_kcmp              343
-#define __NR_finit_module      344
-#define __NR_sched_setattr     345
-#define __NR_sched_getattr     346
-#define __NR_renameat2         347
-#define __NR_seccomp           348
-#define __NR_getrandom         349
-#define __NR_memfd_create      350
-#define __NR_bpf               351
-#define __NR_s390_pci_mmio_write       352
-#define __NR_s390_pci_mmio_read                353
-#define __NR_execveat          354
-#define __NR_userfaultfd       355
-#define __NR_membarrier                356
-#define __NR_recvmmsg          357
-#define __NR_sendmmsg          358
-#define __NR_socket            359
-#define __NR_socketpair                360
-#define __NR_bind              361
-#define __NR_connect           362
-#define __NR_listen            363
-#define __NR_accept4           364
-#define __NR_getsockopt                365
-#define __NR_setsockopt                366
-#define __NR_getsockname       367
-#define __NR_getpeername       368
-#define __NR_sendto            369
-#define __NR_sendmsg           370
-#define __NR_recvfrom          371
-#define __NR_recvmsg           372
-#define __NR_shutdown          373
-#define __NR_mlock2            374
-#define __NR_copy_file_range   375
-#define __NR_preadv2           376
-#define __NR_pwritev2          377
-#define __NR_s390_guarded_storage      378
-#define __NR_statx             379
-#define __NR_s390_sthyi                380
-#define NR_syscalls 381
-
-/* 
- * There are some system calls that are not present on 64 bit, some
- * have a different name although they do the same (e.g. __NR_chown32
- * is __NR_chown on 64 bit).
- */
-#ifndef __s390x__
-
-#define __NR_time               13
-#define __NR_lchown             16
-#define __NR_setuid             23
-#define __NR_getuid             24
-#define __NR_stime              25
-#define __NR_setgid             46
-#define __NR_getgid             47
-#define __NR_geteuid            49
-#define __NR_getegid            50
-#define __NR_setreuid           70
-#define __NR_setregid           71
-#define __NR_getrlimit          76
-#define __NR_getgroups          80
-#define __NR_setgroups          81
-#define __NR_fchown             95
-#define __NR_ioperm            101
-#define __NR_setfsuid          138
-#define __NR_setfsgid          139
-#define __NR__llseek           140
-#define __NR__newselect        142
-#define __NR_setresuid         164
-#define __NR_getresuid         165
-#define __NR_setresgid         170
-#define __NR_getresgid         171
-#define __NR_chown             182
-#define __NR_ugetrlimit                191     /* SuS compliant getrlimit */
-#define __NR_mmap2             192
-#define __NR_truncate64                193
-#define __NR_ftruncate64       194
-#define __NR_stat64            195
-#define __NR_lstat64           196
-#define __NR_fstat64           197
-#define __NR_lchown32          198
-#define __NR_getuid32          199
-#define __NR_getgid32          200
-#define __NR_geteuid32         201
-#define __NR_getegid32         202
-#define __NR_setreuid32                203
-#define __NR_setregid32                204
-#define __NR_getgroups32       205
-#define __NR_setgroups32       206
-#define __NR_fchown32          207
-#define __NR_setresuid32       208
-#define __NR_getresuid32       209
-#define __NR_setresgid32       210
-#define __NR_getresgid32       211
-#define __NR_chown32           212
-#define __NR_setuid32          213
-#define __NR_setgid32          214
-#define __NR_setfsuid32                215
-#define __NR_setfsgid32                216
-#define __NR_fcntl64           221
-#define __NR_sendfile64                223
-#define __NR_fadvise64_64      264
-#define __NR_fstatat64         293
-
-#else
-
-#define __NR_select            142
-#define __NR_getrlimit         191     /* SuS compliant getrlimit */
-#define __NR_lchown            198
-#define __NR_getuid            199
-#define __NR_getgid            200
-#define __NR_geteuid           201
-#define __NR_getegid           202
-#define __NR_setreuid                  203
-#define __NR_setregid                  204
-#define __NR_getgroups         205
-#define __NR_setgroups         206
-#define __NR_fchown            207
-#define __NR_setresuid         208
-#define __NR_getresuid         209
-#define __NR_setresgid         210
-#define __NR_getresgid         211
-#define __NR_chown             212
-#define __NR_setuid            213
-#define __NR_setgid            214
-#define __NR_setfsuid                  215
-#define __NR_setfsgid                  216
-#define __NR_newfstatat                293
-
-#endif
-
-#endif /* _UAPI_ASM_S390_UNISTD_H_ */
index 1d9199e1c2ad45531b21f5300439b5ef18943193..0dfe4d3f74e24d6655fc40f0460b9e489fb9ef69 100644 (file)
 
 #define X86_FEATURE_MBA                        ( 7*32+18) /* Memory Bandwidth Allocation */
 #define X86_FEATURE_RSB_CTXSW          ( 7*32+19) /* "" Fill RSB on context switches */
+#define X86_FEATURE_SEV                        ( 7*32+20) /* AMD Secure Encrypted Virtualization */
 
 #define X86_FEATURE_USE_IBPB           ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
 
index ac3c6503ca27f156ddbc3dd304bbf91671a9ac2a..536ee4febd746b7d93b4066919d971452df560cc 100644 (file)
@@ -86,6 +86,62 @@ enum i915_mocs_table_index {
        I915_MOCS_CACHED,
 };
 
+/*
+ * Different engines serve different roles, and there may be more than one
+ * engine serving each role. enum drm_i915_gem_engine_class provides a
+ * classification of the role of the engine, which may be used when requesting
+ * operations to be performed on a certain subset of engines, or for providing
+ * information about that group.
+ */
+enum drm_i915_gem_engine_class {
+       I915_ENGINE_CLASS_RENDER        = 0,
+       I915_ENGINE_CLASS_COPY          = 1,
+       I915_ENGINE_CLASS_VIDEO         = 2,
+       I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
+
+       I915_ENGINE_CLASS_INVALID       = -1
+};
+
+/**
+ * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
+ *
+ */
+
+enum drm_i915_pmu_engine_sample {
+       I915_SAMPLE_BUSY = 0,
+       I915_SAMPLE_WAIT = 1,
+       I915_SAMPLE_SEMA = 2
+};
+
+#define I915_PMU_SAMPLE_BITS (4)
+#define I915_PMU_SAMPLE_MASK (0xf)
+#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
+#define I915_PMU_CLASS_SHIFT \
+       (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
+
+#define __I915_PMU_ENGINE(class, instance, sample) \
+       ((class) << I915_PMU_CLASS_SHIFT | \
+       (instance) << I915_PMU_SAMPLE_BITS | \
+       (sample))
+
+#define I915_PMU_ENGINE_BUSY(class, instance) \
+       __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
+
+#define I915_PMU_ENGINE_WAIT(class, instance) \
+       __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
+
+#define I915_PMU_ENGINE_SEMA(class, instance) \
+       __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
+
+#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
+
+#define I915_PMU_ACTUAL_FREQUENCY      __I915_PMU_OTHER(0)
+#define I915_PMU_REQUESTED_FREQUENCY   __I915_PMU_OTHER(1)
+#define I915_PMU_INTERRUPTS            __I915_PMU_OTHER(2)
+#define I915_PMU_RC6_RESIDENCY         __I915_PMU_OTHER(3)
+
+#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
+
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
 #define I915_NR_TEX_REGIONS 255        /* table size 2k - maximum due to use
@@ -450,6 +506,27 @@ typedef struct drm_i915_irq_wait {
  */
 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
 
+/*
+ * Query whether every context (both per-file default and user created) is
+ * isolated (insofar as HW supports). If this parameter is not true, then
+ * freshly created contexts may inherit values from an existing context,
+ * rather than default HW values. If true, it also ensures (insofar as HW
+ * supports) that all state set by this context will not leak to any other
+ * context.
+ *
+ * As not every engine across every gen support contexts, the returned
+ * value reports the support of context isolation for individual engines by
+ * returning a bitmask of each engine class set to true if that class supports
+ * isolation.
+ */
+#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
+
+/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
+ * registers. This used to be fixed per platform but from CNL onwards, this
+ * might vary depending on the parts.
+ */
+#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
+
 typedef struct drm_i915_getparam {
        __s32 param;
        /*
index 8616131e2c61d9ff30a204144a02d2450f710461..6d9447700e18c983804e1fecc4a6854e138d10f6 100644 (file)
@@ -163,6 +163,7 @@ enum {
        IFLA_IF_NETNSID,
        IFLA_CARRIER_UP_COUNT,
        IFLA_CARRIER_DOWN_COUNT,
+       IFLA_NEW_IFINDEX,
        __IFLA_MAX
 };
 
index 8fb90a0819c3939eb2a74c6053c9dc5d586d9eab..0fb5ef939732517293f222f2c85d88f2b4c1e973 100644 (file)
@@ -1362,6 +1362,96 @@ struct kvm_s390_ucas_mapping {
 /* Available with KVM_CAP_S390_CMMA_MIGRATION */
 #define KVM_S390_GET_CMMA_BITS      _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
 #define KVM_S390_SET_CMMA_BITS      _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
+/* Memory Encryption Commands */
+#define KVM_MEMORY_ENCRYPT_OP      _IOWR(KVMIO, 0xba, unsigned long)
+
+struct kvm_enc_region {
+       __u64 addr;
+       __u64 size;
+};
+
+#define KVM_MEMORY_ENCRYPT_REG_REGION    _IOR(KVMIO, 0xbb, struct kvm_enc_region)
+#define KVM_MEMORY_ENCRYPT_UNREG_REGION  _IOR(KVMIO, 0xbc, struct kvm_enc_region)
+
+/* Secure Encrypted Virtualization command */
+enum sev_cmd_id {
+       /* Guest initialization commands */
+       KVM_SEV_INIT = 0,
+       KVM_SEV_ES_INIT,
+       /* Guest launch commands */
+       KVM_SEV_LAUNCH_START,
+       KVM_SEV_LAUNCH_UPDATE_DATA,
+       KVM_SEV_LAUNCH_UPDATE_VMSA,
+       KVM_SEV_LAUNCH_SECRET,
+       KVM_SEV_LAUNCH_MEASURE,
+       KVM_SEV_LAUNCH_FINISH,
+       /* Guest migration commands (outgoing) */
+       KVM_SEV_SEND_START,
+       KVM_SEV_SEND_UPDATE_DATA,
+       KVM_SEV_SEND_UPDATE_VMSA,
+       KVM_SEV_SEND_FINISH,
+       /* Guest migration commands (incoming) */
+       KVM_SEV_RECEIVE_START,
+       KVM_SEV_RECEIVE_UPDATE_DATA,
+       KVM_SEV_RECEIVE_UPDATE_VMSA,
+       KVM_SEV_RECEIVE_FINISH,
+       /* Guest status and debug commands */
+       KVM_SEV_GUEST_STATUS,
+       KVM_SEV_DBG_DECRYPT,
+       KVM_SEV_DBG_ENCRYPT,
+       /* Guest certificates commands */
+       KVM_SEV_CERT_EXPORT,
+
+       KVM_SEV_NR_MAX,
+};
+
+struct kvm_sev_cmd {
+       __u32 id;
+       __u64 data;
+       __u32 error;
+       __u32 sev_fd;
+};
+
+struct kvm_sev_launch_start {
+       __u32 handle;
+       __u32 policy;
+       __u64 dh_uaddr;
+       __u32 dh_len;
+       __u64 session_uaddr;
+       __u32 session_len;
+};
+
+struct kvm_sev_launch_update_data {
+       __u64 uaddr;
+       __u32 len;
+};
+
+
+struct kvm_sev_launch_secret {
+       __u64 hdr_uaddr;
+       __u32 hdr_len;
+       __u64 guest_uaddr;
+       __u32 guest_len;
+       __u64 trans_uaddr;
+       __u32 trans_len;
+};
+
+struct kvm_sev_launch_measure {
+       __u64 uaddr;
+       __u32 len;
+};
+
+struct kvm_sev_guest_status {
+       __u32 handle;
+       __u32 policy;
+       __u32 state;
+};
+
+struct kvm_sev_dbg {
+       __u64 src_uaddr;
+       __u64 dst_uaddr;
+       __u32 len;
+};
 
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU    (1 << 0)
 #define KVM_DEV_ASSIGN_PCI_2_3         (1 << 1)
index b00b1896547e41d12013eeeda60ecc91ee6231ea..a8cb69a2657658ec41c5877f1b4a47526c8079b0 100644 (file)
@@ -852,8 +852,14 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
  *    This is a fairly uncommon pattern which is new for GCC 6.  As of this
  *    writing, there are 11 occurrences of it in the allmodconfig kernel.
  *
+ *    As of GCC 7 there are quite a few more of these and the 'in between' code
+ *    is significant. Esp. with KASAN enabled some of the code between the mov
+ *    and jmpq uses .rodata itself, which can confuse things.
+ *
  *    TODO: Once we have DWARF CFI and smarter instruction decoding logic,
  *    ensure the same register is used in the mov and jump instructions.
+ *
+ *    NOTE: RETPOLINE made it harder still to decode dynamic jumps.
  */
 static struct rela *find_switch_table(struct objtool_file *file,
                                      struct symbol *func,
@@ -875,12 +881,25 @@ static struct rela *find_switch_table(struct objtool_file *file,
                                                text_rela->addend + 4);
                if (!rodata_rela)
                        return NULL;
+
                file->ignore_unreachables = true;
                return rodata_rela;
        }
 
        /* case 3 */
-       func_for_each_insn_continue_reverse(file, func, insn) {
+       /*
+        * Backward search using the @first_jump_src links, these help avoid
+        * much of the 'in between' code. Which avoids us getting confused by
+        * it.
+        */
+       for (insn = list_prev_entry(insn, list);
+
+            &insn->list != &file->insn_list &&
+            insn->sec == func->sec &&
+            insn->offset >= func->offset;
+
+            insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
+
                if (insn->type == INSN_JUMP_DYNAMIC)
                        break;
 
@@ -910,14 +929,32 @@ static struct rela *find_switch_table(struct objtool_file *file,
        return NULL;
 }
 
+
 static int add_func_switch_tables(struct objtool_file *file,
                                  struct symbol *func)
 {
-       struct instruction *insn, *prev_jump = NULL;
+       struct instruction *insn, *last = NULL, *prev_jump = NULL;
        struct rela *rela, *prev_rela = NULL;
        int ret;
 
        func_for_each_insn(file, func, insn) {
+               if (!last)
+                       last = insn;
+
+               /*
+                * Store back-pointers for unconditional forward jumps such
+                * that find_switch_table() can back-track using those and
+                * avoid some potentially confusing code.
+                */
+               if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
+                   insn->offset > last->offset &&
+                   insn->jump_dest->offset > insn->offset &&
+                   !insn->jump_dest->first_jump_src) {
+
+                       insn->jump_dest->first_jump_src = insn;
+                       last = insn->jump_dest;
+               }
+
                if (insn->type != INSN_JUMP_DYNAMIC)
                        continue;
 
@@ -1899,13 +1936,19 @@ static bool ignore_unreachable_insn(struct instruction *insn)
                if (is_kasan_insn(insn) || is_ubsan_insn(insn))
                        return true;
 
-               if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
-                       insn = insn->jump_dest;
-                       continue;
+               if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+                       if (insn->jump_dest &&
+                           insn->jump_dest->func == insn->func) {
+                               insn = insn->jump_dest;
+                               continue;
+                       }
+
+                       break;
                }
 
                if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
                        break;
+
                insn = list_next_entry(insn, list);
        }
 
index dbadb304a410af0343809752bc6d2869668a9db0..23a1d065cae190c11432bd2c768f38c7f91e470b 100644 (file)
@@ -47,6 +47,7 @@ struct instruction {
        bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
        struct symbol *call_dest;
        struct instruction *jump_dest;
+       struct instruction *first_jump_src;
        struct list_head alts;
        struct symbol *func;
        struct stack_op stack_op;
index f0796a47dfa30ee6523381e069340b3eeeeeb3ef..90bb4aabe4f8d48e928c1fd4ae82d45ae7baeb50 100644 (file)
@@ -30,6 +30,10 @@ OPTIONS for 'convert'
 -i::
        Specify input perf data file path.
 
+-f::
+--force::
+       Don't complain, do it.
+
 -v::
 --verbose::
         Be more verbose (show counter open errors, etc).
index 48228de415d00620d0017e1b215d3d09a951e676..dfa6e31034371c26666429678047c037c1aae5dc 100644 (file)
@@ -10,15 +10,19 @@ PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
 
 out    := $(OUTPUT)arch/s390/include/generated/asm
 header := $(out)/syscalls_64.c
-sysdef := $(srctree)/tools/arch/s390/include/uapi/asm/unistd.h
-sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls/
+syskrn := $(srctree)/arch/s390/kernel/syscalls/syscall.tbl
+sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls
+sysdef := $(sysprf)/syscall.tbl
 systbl := $(sysprf)/mksyscalltbl
 
 # Create output directory if not already present
 _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
 
 $(header): $(sysdef) $(systbl)
-       $(Q)$(SHELL) '$(systbl)' '$(CC)' $(sysdef) > $@
+       @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+        (diff -B $(sysdef) $(syskrn) >/dev/null) \
+        || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
+       $(Q)$(SHELL) '$(systbl)' $(sysdef) > $@
 
 clean::
        $(call QUIET_CLEAN, s390) $(RM) $(header)
index 7fa0d0abd4196c52b633ce5a06acb115231133ea..72ecbb67637079f709262298981162b6cbb12c9f 100755 (executable)
@@ -3,25 +3,23 @@
 #
 # Generate system call table for perf
 #
-#
-# Copyright IBM Corp. 2017
+# Copyright IBM Corp. 2017, 2018
 # Author(s):  Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
 #
 
-gcc=$1
-input=$2
+SYSCALL_TBL=$1
 
-if ! test -r $input; then
+if ! test -r $SYSCALL_TBL; then
        echo "Could not read input file" >&2
        exit 1
 fi
 
 create_table()
 {
-       local max_nr
+       local max_nr nr abi sc discard
 
        echo 'static const char *syscalltbl_s390_64[] = {'
-       while read sc nr; do
+       while read nr abi sc discard; do
                printf '\t[%d] = "%s",\n' $nr $sc
                max_nr=$nr
        done
@@ -29,8 +27,6 @@ create_table()
        echo "#define SYSCALLTBL_S390_64_MAX_ID $max_nr"
 }
 
-
-$gcc -m64 -E -dM -x c  $input         \
-       |sed -ne 's/^#define __NR_//p' \
-       |sort -t' ' -k2 -nu            \
+grep -E "^[[:digit:]]+[[:space:]]+(common|64)" $SYSCALL_TBL    \
+       |sort -k1 -n                                    \
        |create_table
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
new file mode 100644 (file)
index 0000000..b38d484
--- /dev/null
@@ -0,0 +1,390 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# System call table for s390
+#
+# Format:
+#
+# <nr> <abi> <syscall> <entry-64bit> <compat-entry>
+#
+# where <abi> can be common, 64, or 32
+
+1    common    exit                    sys_exit                        sys_exit
+2    common    fork                    sys_fork                        sys_fork
+3    common    read                    sys_read                        compat_sys_s390_read
+4    common    write                   sys_write                       compat_sys_s390_write
+5    common    open                    sys_open                        compat_sys_open
+6    common    close                   sys_close                       sys_close
+7    common    restart_syscall         sys_restart_syscall             sys_restart_syscall
+8    common    creat                   sys_creat                       compat_sys_creat
+9    common    link                    sys_link                        compat_sys_link
+10   common    unlink                  sys_unlink                      compat_sys_unlink
+11   common    execve                  sys_execve                      compat_sys_execve
+12   common    chdir                   sys_chdir                       compat_sys_chdir
+13   32                time                    -                               compat_sys_time
+14   common    mknod                   sys_mknod                       compat_sys_mknod
+15   common    chmod                   sys_chmod                       compat_sys_chmod
+16   32                lchown                  -                               compat_sys_s390_lchown16
+19   common    lseek                   sys_lseek                       compat_sys_lseek
+20   common    getpid                  sys_getpid                      sys_getpid
+21   common    mount                   sys_mount                       compat_sys_mount
+22   common    umount                  sys_oldumount                   compat_sys_oldumount
+23   32                setuid                  -                               compat_sys_s390_setuid16
+24   32                getuid                  -                               compat_sys_s390_getuid16
+25   32                stime                   -                               compat_sys_stime
+26   common    ptrace                  sys_ptrace                      compat_sys_ptrace
+27   common    alarm                   sys_alarm                       sys_alarm
+29   common    pause                   sys_pause                       sys_pause
+30   common    utime                   sys_utime                       compat_sys_utime
+33   common    access                  sys_access                      compat_sys_access
+34   common    nice                    sys_nice                        sys_nice
+36   common    sync                    sys_sync                        sys_sync
+37   common    kill                    sys_kill                        sys_kill
+38   common    rename                  sys_rename                      compat_sys_rename
+39   common    mkdir                   sys_mkdir                       compat_sys_mkdir
+40   common    rmdir                   sys_rmdir                       compat_sys_rmdir
+41   common    dup                     sys_dup                         sys_dup
+42   common    pipe                    sys_pipe                        compat_sys_pipe
+43   common    times                   sys_times                       compat_sys_times
+45   common    brk                     sys_brk                         compat_sys_brk
+46   32                setgid                  -                               compat_sys_s390_setgid16
+47   32                getgid                  -                               compat_sys_s390_getgid16
+48   common    signal                  sys_signal                      compat_sys_signal
+49   32                geteuid                 -                               compat_sys_s390_geteuid16
+50   32                getegid                 -                               compat_sys_s390_getegid16
+51   common    acct                    sys_acct                        compat_sys_acct
+52   common    umount2                 sys_umount                      compat_sys_umount
+54   common    ioctl                   sys_ioctl                       compat_sys_ioctl
+55   common    fcntl                   sys_fcntl                       compat_sys_fcntl
+57   common    setpgid                 sys_setpgid                     sys_setpgid
+60   common    umask                   sys_umask                       sys_umask
+61   common    chroot                  sys_chroot                      compat_sys_chroot
+62   common    ustat                   sys_ustat                       compat_sys_ustat
+63   common    dup2                    sys_dup2                        sys_dup2
+64   common    getppid                 sys_getppid                     sys_getppid
+65   common    getpgrp                 sys_getpgrp                     sys_getpgrp
+66   common    setsid                  sys_setsid                      sys_setsid
+67   common    sigaction               sys_sigaction                   compat_sys_sigaction
+70   32                setreuid                -                               compat_sys_s390_setreuid16
+71   32                setregid                -                               compat_sys_s390_setregid16
+72   common    sigsuspend              sys_sigsuspend                  compat_sys_sigsuspend
+73   common    sigpending              sys_sigpending                  compat_sys_sigpending
+74   common    sethostname             sys_sethostname                 compat_sys_sethostname
+75   common    setrlimit               sys_setrlimit                   compat_sys_setrlimit
+76   32                getrlimit               -                               compat_sys_old_getrlimit
+77   common    getrusage               sys_getrusage                   compat_sys_getrusage
+78   common    gettimeofday            sys_gettimeofday                compat_sys_gettimeofday
+79   common    settimeofday            sys_settimeofday                compat_sys_settimeofday
+80   32                getgroups               -                               compat_sys_s390_getgroups16
+81   32                setgroups               -                               compat_sys_s390_setgroups16
+83   common    symlink                 sys_symlink                     compat_sys_symlink
+85   common    readlink                sys_readlink                    compat_sys_readlink
+86   common    uselib                  sys_uselib                      compat_sys_uselib
+87   common    swapon                  sys_swapon                      compat_sys_swapon
+88   common    reboot                  sys_reboot                      compat_sys_reboot
+89   common    readdir                 -                               compat_sys_old_readdir
+90   common    mmap                    sys_old_mmap                    compat_sys_s390_old_mmap
+91   common    munmap                  sys_munmap                      compat_sys_munmap
+92   common    truncate                sys_truncate                    compat_sys_truncate
+93   common    ftruncate               sys_ftruncate                   compat_sys_ftruncate
+94   common    fchmod                  sys_fchmod                      sys_fchmod
+95   32                fchown                  -                               compat_sys_s390_fchown16
+96   common    getpriority             sys_getpriority                 sys_getpriority
+97   common    setpriority             sys_setpriority                 sys_setpriority
+99   common    statfs                  sys_statfs                      compat_sys_statfs
+100  common    fstatfs                 sys_fstatfs                     compat_sys_fstatfs
+101  32                ioperm                  -                               -
+102  common    socketcall              sys_socketcall                  compat_sys_socketcall
+103  common    syslog                  sys_syslog                      compat_sys_syslog
+104  common    setitimer               sys_setitimer                   compat_sys_setitimer
+105  common    getitimer               sys_getitimer                   compat_sys_getitimer
+106  common    stat                    sys_newstat                     compat_sys_newstat
+107  common    lstat                   sys_newlstat                    compat_sys_newlstat
+108  common    fstat                   sys_newfstat                    compat_sys_newfstat
+110  common    lookup_dcookie          sys_lookup_dcookie              compat_sys_lookup_dcookie
+111  common    vhangup                 sys_vhangup                     sys_vhangup
+112  common    idle                    -                               -
+114  common    wait4                   sys_wait4                       compat_sys_wait4
+115  common    swapoff                 sys_swapoff                     compat_sys_swapoff
+116  common    sysinfo                 sys_sysinfo                     compat_sys_sysinfo
+117  common    ipc                     sys_s390_ipc                    compat_sys_s390_ipc
+118  common    fsync                   sys_fsync                       sys_fsync
+119  common    sigreturn               sys_sigreturn                   compat_sys_sigreturn
+120  common    clone                   sys_clone                       compat_sys_clone
+121  common    setdomainname           sys_setdomainname               compat_sys_setdomainname
+122  common    uname                   sys_newuname                    compat_sys_newuname
+124  common    adjtimex                sys_adjtimex                    compat_sys_adjtimex
+125  common    mprotect                sys_mprotect                    compat_sys_mprotect
+126  common    sigprocmask             sys_sigprocmask                 compat_sys_sigprocmask
+127  common    create_module           -                               -
+128  common    init_module             sys_init_module                 compat_sys_init_module
+129  common    delete_module           sys_delete_module               compat_sys_delete_module
+130  common    get_kernel_syms         -                               -
+131  common    quotactl                sys_quotactl                    compat_sys_quotactl
+132  common    getpgid                 sys_getpgid                     sys_getpgid
+133  common    fchdir                  sys_fchdir                      sys_fchdir
+134  common    bdflush                 sys_bdflush                     compat_sys_bdflush
+135  common    sysfs                   sys_sysfs                       compat_sys_sysfs
+136  common    personality             sys_s390_personality            sys_s390_personality
+137  common    afs_syscall             -                               -
+138  32                setfsuid                -                               compat_sys_s390_setfsuid16
+139  32                setfsgid                -                               compat_sys_s390_setfsgid16
+140  32                _llseek                 -                               compat_sys_llseek
+141  common    getdents                sys_getdents                    compat_sys_getdents
+142  32                _newselect              -                               compat_sys_select
+142  64                select                  sys_select                      -
+143  common    flock                   sys_flock                       sys_flock
+144  common    msync                   sys_msync                       compat_sys_msync
+145  common    readv                   sys_readv                       compat_sys_readv
+146  common    writev                  sys_writev                      compat_sys_writev
+147  common    getsid                  sys_getsid                      sys_getsid
+148  common    fdatasync               sys_fdatasync                   sys_fdatasync
+149  common    _sysctl                 sys_sysctl                      compat_sys_sysctl
+150  common    mlock                   sys_mlock                       compat_sys_mlock
+151  common    munlock                 sys_munlock                     compat_sys_munlock
+152  common    mlockall                sys_mlockall                    sys_mlockall
+153  common    munlockall              sys_munlockall                  sys_munlockall
+154  common    sched_setparam          sys_sched_setparam              compat_sys_sched_setparam
+155  common    sched_getparam          sys_sched_getparam              compat_sys_sched_getparam
+156  common    sched_setscheduler      sys_sched_setscheduler          compat_sys_sched_setscheduler
+157  common    sched_getscheduler      sys_sched_getscheduler          sys_sched_getscheduler
+158  common    sched_yield             sys_sched_yield                 sys_sched_yield
+159  common    sched_get_priority_max  sys_sched_get_priority_max      sys_sched_get_priority_max
+160  common    sched_get_priority_min  sys_sched_get_priority_min      sys_sched_get_priority_min
+161  common    sched_rr_get_interval   sys_sched_rr_get_interval       compat_sys_sched_rr_get_interval
+162  common    nanosleep               sys_nanosleep                   compat_sys_nanosleep
+163  common    mremap                  sys_mremap                      compat_sys_mremap
+164  32                setresuid               -                               compat_sys_s390_setresuid16
+165  32                getresuid               -                               compat_sys_s390_getresuid16
+167  common    query_module            -                               -
+168  common    poll                    sys_poll                        compat_sys_poll
+169  common    nfsservctl              -                               -
+170  32                setresgid               -                               compat_sys_s390_setresgid16
+171  32                getresgid               -                               compat_sys_s390_getresgid16
+172  common    prctl                   sys_prctl                       compat_sys_prctl
+173  common    rt_sigreturn            sys_rt_sigreturn                compat_sys_rt_sigreturn
+174  common    rt_sigaction            sys_rt_sigaction                compat_sys_rt_sigaction
+175  common    rt_sigprocmask          sys_rt_sigprocmask              compat_sys_rt_sigprocmask
+176  common    rt_sigpending           sys_rt_sigpending               compat_sys_rt_sigpending
+177  common    rt_sigtimedwait         sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait
+178  common    rt_sigqueueinfo         sys_rt_sigqueueinfo             compat_sys_rt_sigqueueinfo
+179  common    rt_sigsuspend           sys_rt_sigsuspend               compat_sys_rt_sigsuspend
+180  common    pread64                 sys_pread64                     compat_sys_s390_pread64
+181  common    pwrite64                sys_pwrite64                    compat_sys_s390_pwrite64
+182  32                chown                   -                               compat_sys_s390_chown16
+183  common    getcwd                  sys_getcwd                      compat_sys_getcwd
+184  common    capget                  sys_capget                      compat_sys_capget
+185  common    capset                  sys_capset                      compat_sys_capset
+186  common    sigaltstack             sys_sigaltstack                 compat_sys_sigaltstack
+187  common    sendfile                sys_sendfile64                  compat_sys_sendfile
+188  common    getpmsg                 -                               -
+189  common    putpmsg                 -                               -
+190  common    vfork                   sys_vfork                       sys_vfork
+191  32                ugetrlimit              -                               compat_sys_getrlimit
+191  64                getrlimit               sys_getrlimit                   -
+192  32                mmap2                   -                               compat_sys_s390_mmap2
+193  32                truncate64              -                               compat_sys_s390_truncate64
+194  32                ftruncate64             -                               compat_sys_s390_ftruncate64
+195  32                stat64                  -                               compat_sys_s390_stat64
+196  32                lstat64                 -                               compat_sys_s390_lstat64
+197  32                fstat64                 -                               compat_sys_s390_fstat64
+198  32                lchown32                -                               compat_sys_lchown
+198  64                lchown                  sys_lchown                      -
+199  32                getuid32                -                               sys_getuid
+199  64                getuid                  sys_getuid                      -
+200  32                getgid32                -                               sys_getgid
+200  64                getgid                  sys_getgid                      -
+201  32                geteuid32               -                               sys_geteuid
+201  64                geteuid                 sys_geteuid                     -
+202  32                getegid32               -                               sys_getegid
+202  64                getegid                 sys_getegid                     -
+203  32                setreuid32              -                               sys_setreuid
+203  64                setreuid                sys_setreuid                    -
+204  32                setregid32              -                               sys_setregid
+204  64                setregid                sys_setregid                    -
+205  32                getgroups32             -                               compat_sys_getgroups
+205  64                getgroups               sys_getgroups                   -
+206  32                setgroups32             -                               compat_sys_setgroups
+206  64                setgroups               sys_setgroups                   -
+207  32                fchown32                -                               sys_fchown
+207  64                fchown                  sys_fchown                      -
+208  32                setresuid32             -                               sys_setresuid
+208  64                setresuid               sys_setresuid                   -
+209  32                getresuid32             -                               compat_sys_getresuid
+209  64                getresuid               sys_getresuid                   -
+210  32                setresgid32             -                               sys_setresgid
+210  64                setresgid               sys_setresgid                   -
+211  32                getresgid32             -                               compat_sys_getresgid
+211  64                getresgid               sys_getresgid                   -
+212  32                chown32                 -                               compat_sys_chown
+212  64                chown                   sys_chown                       -
+213  32                setuid32                -                               sys_setuid
+213  64                setuid                  sys_setuid                      -
+214  32                setgid32                -                               sys_setgid
+214  64                setgid                  sys_setgid                      -
+215  32                setfsuid32              -                               sys_setfsuid
+215  64                setfsuid                sys_setfsuid                    -
+216  32                setfsgid32              -                               sys_setfsgid
+216  64                setfsgid                sys_setfsgid                    -
+217  common    pivot_root              sys_pivot_root                  compat_sys_pivot_root
+218  common    mincore                 sys_mincore                     compat_sys_mincore
+219  common    madvise                 sys_madvise                     compat_sys_madvise
+220  common    getdents64              sys_getdents64                  compat_sys_getdents64
+221  32                fcntl64                 -                               compat_sys_fcntl64
+222  common    readahead               sys_readahead                   compat_sys_s390_readahead
+223  32                sendfile64              -                               compat_sys_sendfile64
+224  common    setxattr                sys_setxattr                    compat_sys_setxattr
+225  common    lsetxattr               sys_lsetxattr                   compat_sys_lsetxattr
+226  common    fsetxattr               sys_fsetxattr                   compat_sys_fsetxattr
+227  common    getxattr                sys_getxattr                    compat_sys_getxattr
+228  common    lgetxattr               sys_lgetxattr                   compat_sys_lgetxattr
+229  common    fgetxattr               sys_fgetxattr                   compat_sys_fgetxattr
+230  common    listxattr               sys_listxattr                   compat_sys_listxattr
+231  common    llistxattr              sys_llistxattr                  compat_sys_llistxattr
+232  common    flistxattr              sys_flistxattr                  compat_sys_flistxattr
+233  common    removexattr             sys_removexattr                 compat_sys_removexattr
+234  common    lremovexattr            sys_lremovexattr                compat_sys_lremovexattr
+235  common    fremovexattr            sys_fremovexattr                compat_sys_fremovexattr
+236  common    gettid                  sys_gettid                      sys_gettid
+237  common    tkill                   sys_tkill                       sys_tkill
+238  common    futex                   sys_futex                       compat_sys_futex
+239  common    sched_setaffinity       sys_sched_setaffinity           compat_sys_sched_setaffinity
+240  common    sched_getaffinity       sys_sched_getaffinity           compat_sys_sched_getaffinity
+241  common    tgkill                  sys_tgkill                      sys_tgkill
+243  common    io_setup                sys_io_setup                    compat_sys_io_setup
+244  common    io_destroy              sys_io_destroy                  compat_sys_io_destroy
+245  common    io_getevents            sys_io_getevents                compat_sys_io_getevents
+246  common    io_submit               sys_io_submit                   compat_sys_io_submit
+247  common    io_cancel               sys_io_cancel                   compat_sys_io_cancel
+248  common    exit_group              sys_exit_group                  sys_exit_group
+249  common    epoll_create            sys_epoll_create                sys_epoll_create
+250  common    epoll_ctl               sys_epoll_ctl                   compat_sys_epoll_ctl
+251  common    epoll_wait              sys_epoll_wait                  compat_sys_epoll_wait
+252  common    set_tid_address         sys_set_tid_address             compat_sys_set_tid_address
+253  common    fadvise64               sys_fadvise64_64                compat_sys_s390_fadvise64
+254  common    timer_create            sys_timer_create                compat_sys_timer_create
+255  common    timer_settime           sys_timer_settime               compat_sys_timer_settime
+256  common    timer_gettime           sys_timer_gettime               compat_sys_timer_gettime
+257  common    timer_getoverrun        sys_timer_getoverrun            sys_timer_getoverrun
+258  common    timer_delete            sys_timer_delete                sys_timer_delete
+259  common    clock_settime           sys_clock_settime               compat_sys_clock_settime
+260  common    clock_gettime           sys_clock_gettime               compat_sys_clock_gettime
+261  common    clock_getres            sys_clock_getres                compat_sys_clock_getres
+262  common    clock_nanosleep         sys_clock_nanosleep             compat_sys_clock_nanosleep
+264  32                fadvise64_64            -                               compat_sys_s390_fadvise64_64
+265  common    statfs64                sys_statfs64                    compat_sys_statfs64
+266  common    fstatfs64               sys_fstatfs64                   compat_sys_fstatfs64
+267  common    remap_file_pages        sys_remap_file_pages            compat_sys_remap_file_pages
+268  common    mbind                   sys_mbind                       compat_sys_mbind
+269  common    get_mempolicy           sys_get_mempolicy               compat_sys_get_mempolicy
+270  common    set_mempolicy           sys_set_mempolicy               compat_sys_set_mempolicy
+271  common    mq_open                 sys_mq_open                     compat_sys_mq_open
+272  common    mq_unlink               sys_mq_unlink                   compat_sys_mq_unlink
+273  common    mq_timedsend            sys_mq_timedsend                compat_sys_mq_timedsend
+274  common    mq_timedreceive         sys_mq_timedreceive             compat_sys_mq_timedreceive
+275  common    mq_notify               sys_mq_notify                   compat_sys_mq_notify
+276  common    mq_getsetattr           sys_mq_getsetattr               compat_sys_mq_getsetattr
+277  common    kexec_load              sys_kexec_load                  compat_sys_kexec_load
+278  common    add_key                 sys_add_key                     compat_sys_add_key
+279  common    request_key             sys_request_key                 compat_sys_request_key
+280  common    keyctl                  sys_keyctl                      compat_sys_keyctl
+281  common    waitid                  sys_waitid                      compat_sys_waitid
+282  common    ioprio_set              sys_ioprio_set                  sys_ioprio_set
+283  common    ioprio_get              sys_ioprio_get                  sys_ioprio_get
+284  common    inotify_init            sys_inotify_init                sys_inotify_init
+285  common    inotify_add_watch       sys_inotify_add_watch           compat_sys_inotify_add_watch
+286  common    inotify_rm_watch        sys_inotify_rm_watch            sys_inotify_rm_watch
+287  common    migrate_pages           sys_migrate_pages               compat_sys_migrate_pages
+288  common    openat                  sys_openat                      compat_sys_openat
+289  common    mkdirat                 sys_mkdirat                     compat_sys_mkdirat
+290  common    mknodat                 sys_mknodat                     compat_sys_mknodat
+291  common    fchownat                sys_fchownat                    compat_sys_fchownat
+292  common    futimesat               sys_futimesat                   compat_sys_futimesat
+293  32                fstatat64               -                               compat_sys_s390_fstatat64
+293  64                newfstatat              sys_newfstatat                  -
+294  common    unlinkat                sys_unlinkat                    compat_sys_unlinkat
+295  common    renameat                sys_renameat                    compat_sys_renameat
+296  common    linkat                  sys_linkat                      compat_sys_linkat
+297  common    symlinkat               sys_symlinkat                   compat_sys_symlinkat
+298  common    readlinkat              sys_readlinkat                  compat_sys_readlinkat
+299  common    fchmodat                sys_fchmodat                    compat_sys_fchmodat
+300  common    faccessat               sys_faccessat                   compat_sys_faccessat
+301  common    pselect6                sys_pselect6                    compat_sys_pselect6
+302  common    ppoll                   sys_ppoll                       compat_sys_ppoll
+303  common    unshare                 sys_unshare                     compat_sys_unshare
+304  common    set_robust_list         sys_set_robust_list             compat_sys_set_robust_list
+305  common    get_robust_list         sys_get_robust_list             compat_sys_get_robust_list
+306  common    splice                  sys_splice                      compat_sys_splice
+307  common    sync_file_range         sys_sync_file_range             compat_sys_s390_sync_file_range
+308  common    tee                     sys_tee                         compat_sys_tee
+309  common    vmsplice                sys_vmsplice                    compat_sys_vmsplice
+310  common    move_pages              sys_move_pages                  compat_sys_move_pages
+311  common    getcpu                  sys_getcpu                      compat_sys_getcpu
+312  common    epoll_pwait             sys_epoll_pwait                 compat_sys_epoll_pwait
+313  common    utimes                  sys_utimes                      compat_sys_utimes
+314  common    fallocate               sys_fallocate                   compat_sys_s390_fallocate
+315  common    utimensat               sys_utimensat                   compat_sys_utimensat
+316  common    signalfd                sys_signalfd                    compat_sys_signalfd
+317  common    timerfd                 -                               -
+318  common    eventfd                 sys_eventfd                     sys_eventfd
+319  common    timerfd_create          sys_timerfd_create              sys_timerfd_create
+320  common    timerfd_settime         sys_timerfd_settime             compat_sys_timerfd_settime
+321  common    timerfd_gettime         sys_timerfd_gettime             compat_sys_timerfd_gettime
+322  common    signalfd4               sys_signalfd4                   compat_sys_signalfd4
+323  common    eventfd2                sys_eventfd2                    sys_eventfd2
+324  common    inotify_init1           sys_inotify_init1               sys_inotify_init1
+325  common    pipe2                   sys_pipe2                       compat_sys_pipe2
+326  common    dup3                    sys_dup3                        sys_dup3
+327  common    epoll_create1           sys_epoll_create1               sys_epoll_create1
+328  common    preadv                  sys_preadv                      compat_sys_preadv
+329  common    pwritev                 sys_pwritev                     compat_sys_pwritev
+330  common    rt_tgsigqueueinfo       sys_rt_tgsigqueueinfo           compat_sys_rt_tgsigqueueinfo
+331  common    perf_event_open         sys_perf_event_open             compat_sys_perf_event_open
+332  common    fanotify_init           sys_fanotify_init               sys_fanotify_init
+333  common    fanotify_mark           sys_fanotify_mark               compat_sys_fanotify_mark
+334  common    prlimit64               sys_prlimit64                   compat_sys_prlimit64
+335  common    name_to_handle_at       sys_name_to_handle_at           compat_sys_name_to_handle_at
+336  common    open_by_handle_at       sys_open_by_handle_at           compat_sys_open_by_handle_at
+337  common    clock_adjtime           sys_clock_adjtime               compat_sys_clock_adjtime
+338  common    syncfs                  sys_syncfs                      sys_syncfs
+339  common    setns                   sys_setns                       sys_setns
+340  common    process_vm_readv        sys_process_vm_readv            compat_sys_process_vm_readv
+341  common    process_vm_writev       sys_process_vm_writev           compat_sys_process_vm_writev
+342  common    s390_runtime_instr      sys_s390_runtime_instr          sys_s390_runtime_instr
+343  common    kcmp                    sys_kcmp                        compat_sys_kcmp
+344  common    finit_module            sys_finit_module                compat_sys_finit_module
+345  common    sched_setattr           sys_sched_setattr               compat_sys_sched_setattr
+346  common    sched_getattr           sys_sched_getattr               compat_sys_sched_getattr
+347  common    renameat2               sys_renameat2                   compat_sys_renameat2
+348  common    seccomp                 sys_seccomp                     compat_sys_seccomp
+349  common    getrandom               sys_getrandom                   compat_sys_getrandom
+350  common    memfd_create            sys_memfd_create                compat_sys_memfd_create
+351  common    bpf                     sys_bpf                         compat_sys_bpf
+352  common    s390_pci_mmio_write     sys_s390_pci_mmio_write         compat_sys_s390_pci_mmio_write
+353  common    s390_pci_mmio_read      sys_s390_pci_mmio_read          compat_sys_s390_pci_mmio_read
+354  common    execveat                sys_execveat                    compat_sys_execveat
+355  common    userfaultfd             sys_userfaultfd                 sys_userfaultfd
+356  common    membarrier              sys_membarrier                  sys_membarrier
+357  common    recvmmsg                sys_recvmmsg                    compat_sys_recvmmsg
+358  common    sendmmsg                sys_sendmmsg                    compat_sys_sendmmsg
+359  common    socket                  sys_socket                      sys_socket
+360  common    socketpair              sys_socketpair                  compat_sys_socketpair
+361  common    bind                    sys_bind                        compat_sys_bind
+362  common    connect                 sys_connect                     compat_sys_connect
+363  common    listen                  sys_listen                      sys_listen
+364  common    accept4                 sys_accept4                     compat_sys_accept4
+365  common    getsockopt              sys_getsockopt                  compat_sys_getsockopt
+366  common    setsockopt              sys_setsockopt                  compat_sys_setsockopt
+367  common    getsockname             sys_getsockname                 compat_sys_getsockname
+368  common    getpeername             sys_getpeername                 compat_sys_getpeername
+369  common    sendto                  sys_sendto                      compat_sys_sendto
+370  common    sendmsg                 sys_sendmsg                     compat_sys_sendmsg
+371  common    recvfrom                sys_recvfrom                    compat_sys_recvfrom
+372  common    recvmsg                 sys_recvmsg                     compat_sys_recvmsg
+373  common    shutdown                sys_shutdown                    sys_shutdown
+374  common    mlock2                  sys_mlock2                      compat_sys_mlock2
+375  common    copy_file_range         sys_copy_file_range             compat_sys_copy_file_range
+376  common    preadv2                 sys_preadv2                     compat_sys_preadv2
+377  common    pwritev2                sys_pwritev2                    compat_sys_pwritev2
+378  common    s390_guarded_storage    sys_s390_guarded_storage        compat_sys_s390_guarded_storage
+379  common    statx                   sys_statx                       compat_sys_statx
+380  common    s390_sthyi              sys_s390_sthyi                  compat_sys_s390_sthyi
index c0815a37fdb5a357615be3da2df2fffe62b25222..539c3d4601586ab925549e433ee0e5f0c954ae43 100644 (file)
@@ -2245,7 +2245,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
        c2c_browser__update_nr_entries(browser);
 
        while (1) {
-               key = hist_browser__run(browser, "? - help");
+               key = hist_browser__run(browser, "? - help", true);
 
                switch (key) {
                case 's':
@@ -2314,7 +2314,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
        c2c_browser__update_nr_entries(browser);
 
        while (1) {
-               key = hist_browser__run(browser, "? - help");
+               key = hist_browser__run(browser, "? - help", true);
 
                switch (key) {
                case 'q':
index 42a52dcc41cd4f31e60910eed0adc0e0eda313a0..4ad5dc649716eb7a0a865a1e0af07175d87928c8 100644 (file)
@@ -530,7 +530,8 @@ static int report__browse_hists(struct report *rep)
        case 1:
                ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
                                                    rep->min_percent,
-                                                   &session->header.env);
+                                                   &session->header.env,
+                                                   true);
                /*
                 * Usually "ret" is the last pressed key, and we only
                 * care if the key notifies us to switch data file.
index c6ccda52117d3076e9d46bc1101f090a48d32e65..b7c823ba8374fb702b4ae3d3033e79b852e4686b 100644 (file)
@@ -283,8 +283,9 @@ static void perf_top__print_sym_table(struct perf_top *top)
 
        printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
 
-       if (hists->stats.nr_lost_warned !=
-           hists->stats.nr_events[PERF_RECORD_LOST]) {
+       if (!top->record_opts.overwrite &&
+           (hists->stats.nr_lost_warned !=
+           hists->stats.nr_events[PERF_RECORD_LOST])) {
                hists->stats.nr_lost_warned =
                              hists->stats.nr_events[PERF_RECORD_LOST];
                color_fprintf(stdout, PERF_COLOR_RED,
@@ -611,7 +612,8 @@ static void *display_thread_tui(void *arg)
 
        perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
                                      top->min_percent,
-                                     &top->session->header.env);
+                                     &top->session->header.env,
+                                     !top->record_opts.overwrite);
 
        done = 1;
        return NULL;
@@ -807,15 +809,23 @@ static void perf_event__process_sample(struct perf_tool *tool,
 
 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
 {
+       struct record_opts *opts = &top->record_opts;
+       struct perf_evlist *evlist = top->evlist;
        struct perf_sample sample;
        struct perf_evsel *evsel;
+       struct perf_mmap *md;
        struct perf_session *session = top->session;
        union perf_event *event;
        struct machine *machine;
+       u64 end, start;
        int ret;
 
-       while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
-               ret = perf_evlist__parse_sample(top->evlist, event, &sample);
+       md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
+       if (perf_mmap__read_init(md, opts->overwrite, &start, &end) < 0)
+               return;
+
+       while ((event = perf_mmap__read_event(md, opts->overwrite, &start, end)) != NULL) {
+               ret = perf_evlist__parse_sample(evlist, event, &sample);
                if (ret) {
                        pr_err("Can't parse sample, err = %d\n", ret);
                        goto next_event;
@@ -869,16 +879,120 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
                } else
                        ++session->evlist->stats.nr_unknown_events;
 next_event:
-               perf_evlist__mmap_consume(top->evlist, idx);
+               perf_mmap__consume(md, opts->overwrite);
        }
+
+       perf_mmap__read_done(md);
 }
 
 static void perf_top__mmap_read(struct perf_top *top)
 {
+       bool overwrite = top->record_opts.overwrite;
+       struct perf_evlist *evlist = top->evlist;
+       unsigned long long start, end;
        int i;
 
+       start = rdclock();
+       if (overwrite)
+               perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
+
        for (i = 0; i < top->evlist->nr_mmaps; i++)
                perf_top__mmap_read_idx(top, i);
+
+       if (overwrite) {
+               perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
+               perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
+       }
+       end = rdclock();
+
+       if ((end - start) > (unsigned long long)top->delay_secs * NSEC_PER_SEC)
+               ui__warning("Too slow to read ring buffer.\n"
+                           "Please try increasing the period (-c) or\n"
+                           "decreasing the freq (-F) or\n"
+                           "limiting the number of CPUs (-C)\n");
+}
+
+/*
+ * Check per-event overwrite term.
+ * perf top should support consistent term for all events.
+ * - All events don't have per-event term
+ *   E.g. "cpu/cpu-cycles/,cpu/instructions/"
+ *   Nothing change, return 0.
+ * - All events have same per-event term
+ *   E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
+ *   Using the per-event setting to replace the opts->overwrite if
+ *   they are different, then return 0.
+ * - Events have different per-event term
+ *   E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
+ *   Return -1
+ * - Some of the event set per-event term, but some not.
+ *   E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
+ *   Return -1
+ */
+static int perf_top__overwrite_check(struct perf_top *top)
+{
+       struct record_opts *opts = &top->record_opts;
+       struct perf_evlist *evlist = top->evlist;
+       struct perf_evsel_config_term *term;
+       struct list_head *config_terms;
+       struct perf_evsel *evsel;
+       int set, overwrite = -1;
+
+       evlist__for_each_entry(evlist, evsel) {
+               set = -1;
+               config_terms = &evsel->config_terms;
+               list_for_each_entry(term, config_terms, list) {
+                       if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
+                               set = term->val.overwrite ? 1 : 0;
+               }
+
+               /* no term for current and previous event (likely) */
+               if ((overwrite < 0) && (set < 0))
+                       continue;
+
+               /* has term for both current and previous event, compare */
+               if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
+                       return -1;
+
+               /* no term for current event but has term for previous one */
+               if ((overwrite >= 0) && (set < 0))
+                       return -1;
+
+               /* has term for current event */
+               if ((overwrite < 0) && (set >= 0)) {
+                       /* if it's first event, set overwrite */
+                       if (evsel == perf_evlist__first(evlist))
+                               overwrite = set;
+                       else
+                               return -1;
+               }
+       }
+
+       if ((overwrite >= 0) && (opts->overwrite != overwrite))
+               opts->overwrite = overwrite;
+
+       return 0;
+}
+
+static int perf_top_overwrite_fallback(struct perf_top *top,
+                                      struct perf_evsel *evsel)
+{
+       struct record_opts *opts = &top->record_opts;
+       struct perf_evlist *evlist = top->evlist;
+       struct perf_evsel *counter;
+
+       if (!opts->overwrite)
+               return 0;
+
+       /* only fall back when first event fails */
+       if (evsel != perf_evlist__first(evlist))
+               return 0;
+
+       evlist__for_each_entry(evlist, counter)
+               counter->attr.write_backward = false;
+       opts->overwrite = false;
+       ui__warning("fall back to non-overwrite mode\n");
+       return 1;
 }
 
 static int perf_top__start_counters(struct perf_top *top)
@@ -888,12 +1002,33 @@ static int perf_top__start_counters(struct perf_top *top)
        struct perf_evlist *evlist = top->evlist;
        struct record_opts *opts = &top->record_opts;
 
+       if (perf_top__overwrite_check(top)) {
+               ui__error("perf top only support consistent per-event "
+                         "overwrite setting for all events\n");
+               goto out_err;
+       }
+
        perf_evlist__config(evlist, opts, &callchain_param);
 
        evlist__for_each_entry(evlist, counter) {
 try_again:
                if (perf_evsel__open(counter, top->evlist->cpus,
                                     top->evlist->threads) < 0) {
+
+                       /*
+                        * Specially handle overwrite fall back.
+                        * Because perf top is the only tool which has
+                        * overwrite mode by default, support
+                        * both overwrite and non-overwrite mode, and
+                        * require consistent mode for all events.
+                        *
+                        * May move it to generic code with more tools
+                        * have similar attribute.
+                        */
+                       if (perf_missing_features.write_backward &&
+                           perf_top_overwrite_fallback(top, counter))
+                               goto try_again;
+
                        if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
                                if (verbose > 0)
                                        ui__warning("%s\n", msg);
@@ -1033,7 +1168,7 @@ static int __cmd_top(struct perf_top *top)
 
                perf_top__mmap_read(top);
 
-               if (hits == top->samples)
+               if (opts->overwrite || (hits == top->samples))
                        ret = perf_evlist__poll(top->evlist, 100);
 
                if (resize) {
@@ -1127,6 +1262,7 @@ int cmd_top(int argc, const char **argv)
                                .uses_mmap   = true,
                        },
                        .proc_map_timeout    = 500,
+                       .overwrite      = 1,
                },
                .max_stack           = sysctl_perf_event_max_stack,
                .sym_pcnt_filter     = 5,
index 51abdb0a404749922ba57a7337b140f984be4126..790ec25919a0eb73fe4642ed8ad349aef4d32890 100755 (executable)
@@ -33,7 +33,6 @@ arch/s390/include/uapi/asm/kvm.h
 arch/s390/include/uapi/asm/kvm_perf.h
 arch/s390/include/uapi/asm/ptrace.h
 arch/s390/include/uapi/asm/sie.h
-arch/s390/include/uapi/asm/unistd.h
 arch/arm/include/uapi/asm/kvm.h
 arch/arm64/include/uapi/asm/kvm.h
 arch/alpha/include/uapi/asm/errno.h
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/branch.json
new file mode 100644 (file)
index 0000000..3b62087
--- /dev/null
@@ -0,0 +1,27 @@
+[
+  {,
+    "EventCode": "0x7A",
+    "EventName": "BR_INDIRECT_SPEC",
+    "BriefDescription": "Branch speculatively executed - Indirect branch"
+  },
+  {,
+    "EventCode": "0xC9",
+    "EventName": "BR_COND",
+    "BriefDescription": "Conditional branch executed"
+  },
+  {,
+    "EventCode": "0xCA",
+    "EventName": "BR_INDIRECT_MISPRED",
+    "BriefDescription": "Indirect branch mispredicted"
+  },
+  {,
+    "EventCode": "0xCB",
+    "EventName": "BR_INDIRECT_MISPRED_ADDR",
+    "BriefDescription": "Indirect branch mispredicted because of address miscompare"
+  },
+  {,
+    "EventCode": "0xCC",
+    "EventName": "BR_COND_MISPRED",
+    "BriefDescription": "Conditional branch mispredicted"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/bus.json
new file mode 100644 (file)
index 0000000..480d9f7
--- /dev/null
@@ -0,0 +1,22 @@
+[
+  {,
+    "EventCode": "0x60",
+    "EventName": "BUS_ACCESS_LD",
+    "BriefDescription": "Bus access - Read"
+  },
+  {,
+    "EventCode": "0x61",
+    "EventName": "BUS_ACCESS_ST",
+    "BriefDescription": "Bus access - Write"
+  },
+  {,
+    "EventCode": "0xC0",
+    "EventName": "EXT_MEM_REQ",
+    "BriefDescription": "External memory request"
+  },
+  {,
+    "EventCode": "0xC1",
+    "EventName": "EXT_MEM_REQ_NC",
+    "BriefDescription": "Non-cacheable external memory request"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/cache.json
new file mode 100644 (file)
index 0000000..11baad6
--- /dev/null
@@ -0,0 +1,27 @@
+[
+  {,
+    "EventCode": "0xC2",
+    "EventName": "PREFETCH_LINEFILL",
+    "BriefDescription": "Linefill because of prefetch"
+  },
+  {,
+    "EventCode": "0xC3",
+    "EventName": "PREFETCH_LINEFILL_DROP",
+    "BriefDescription": "Instruction Cache Throttle occurred"
+  },
+  {,
+    "EventCode": "0xC4",
+    "EventName": "READ_ALLOC_ENTER",
+    "BriefDescription": "Entering read allocate mode"
+  },
+  {,
+    "EventCode": "0xC5",
+    "EventName": "READ_ALLOC",
+    "BriefDescription": "Read allocate mode"
+  },
+  {,
+    "EventCode": "0xC8",
+    "EventName": "EXT_SNOOP",
+    "BriefDescription": "SCU Snooped data from another CPU for this CPU"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/memory.json
new file mode 100644 (file)
index 0000000..480d9f7
--- /dev/null
@@ -0,0 +1,22 @@
+[
+  {,
+    "EventCode": "0x60",
+    "EventName": "BUS_ACCESS_LD",
+    "BriefDescription": "Bus access - Read"
+  },
+  {,
+    "EventCode": "0x61",
+    "EventName": "BUS_ACCESS_ST",
+    "BriefDescription": "Bus access - Write"
+  },
+  {,
+    "EventCode": "0xC0",
+    "EventName": "EXT_MEM_REQ",
+    "BriefDescription": "External memory request"
+  },
+  {,
+    "EventCode": "0xC1",
+    "EventName": "EXT_MEM_REQ_NC",
+    "BriefDescription": "Non-cacheable external memory request"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/other.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/other.json
new file mode 100644 (file)
index 0000000..73a2240
--- /dev/null
@@ -0,0 +1,32 @@
+[
+  {,
+    "EventCode": "0x86",
+    "EventName": "EXC_IRQ",
+    "BriefDescription": "Exception taken, IRQ"
+  },
+  {,
+    "EventCode": "0x87",
+    "EventName": "EXC_FIQ",
+    "BriefDescription": "Exception taken, FIQ"
+  },
+  {,
+    "EventCode": "0xC6",
+    "EventName": "PRE_DECODE_ERR",
+    "BriefDescription": "Pre-decode error"
+  },
+  {,
+    "EventCode": "0xD0",
+    "EventName": "L1I_CACHE_ERR",
+    "BriefDescription": "L1 Instruction Cache (data or tag) memory error"
+  },
+  {,
+    "EventCode": "0xD1",
+    "EventName": "L1D_CACHE_ERR",
+    "BriefDescription": "L1 Data Cache (data, tag or dirty) memory error, correctable or non-correctable"
+  },
+  {,
+    "EventCode": "0xD2",
+    "EventName": "TLB_ERR",
+    "BriefDescription": "TLB memory error"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json b/tools/perf/pmu-events/arch/arm64/cortex-a53/pipeline.json
new file mode 100644 (file)
index 0000000..3149fb9
--- /dev/null
@@ -0,0 +1,52 @@
+[
+  {,
+    "EventCode": "0xC7",
+    "EventName": "STALL_SB_FULL",
+    "BriefDescription": "Data Write operation that stalls the pipeline because the store buffer is full"
+  },
+  {,
+    "EventCode": "0xE0",
+    "EventName": "OTHER_IQ_DEP_STALL",
+    "BriefDescription": "Cycles that the DPU IQ is empty and that is not because of a recent micro-TLB miss, instruction cache miss or pre-decode error"
+  },
+  {,
+    "EventCode": "0xE1",
+    "EventName": "IC_DEP_STALL",
+    "BriefDescription": "Cycles the DPU IQ is empty and there is an instruction cache miss being processed"
+  },
+  {,
+    "EventCode": "0xE2",
+    "EventName": "IUTLB_DEP_STALL",
+    "BriefDescription": "Cycles the DPU IQ is empty and there is an instruction micro-TLB miss being processed"
+  },
+  {,
+    "EventCode": "0xE3",
+    "EventName": "DECODE_DEP_STALL",
+    "BriefDescription": "Cycles the DPU IQ is empty and there is a pre-decode error being processed"
+  },
+  {,
+    "EventCode": "0xE4",
+    "EventName": "OTHER_INTERLOCK_STALL",
+    "BriefDescription": "Cycles there is an interlock other than  Advanced SIMD/Floating-point instructions or load/store instruction"
+  },
+  {,
+    "EventCode": "0xE5",
+    "EventName": "AGU_DEP_STALL",
+    "BriefDescription": "Cycles there is an interlock for a load/store instruction waiting for data to calculate the address in the AGU"
+  },
+  {,
+    "EventCode": "0xE6",
+    "EventName": "SIMD_DEP_STALL",
+    "BriefDescription": "Cycles there is an interlock for an Advanced SIMD/Floating-point operation."
+  },
+  {,
+    "EventCode": "0xE7",
+    "EventName": "LD_DEP_STALL",
+    "BriefDescription": "Cycles there is a stall in the Wr stage because of a load miss"
+  },
+  {,
+    "EventCode": "0xE8",
+    "EventName": "ST_DEP_STALL",
+    "BriefDescription": "Cycles there is a stall in the Wr stage because of a store"
+  }
+]
index 219d6756134ee068797786a5caa4463f1506d2c5..e61c9ca6cf9e67fe52605b3b277d1b5fb1157782 100644 (file)
@@ -13,3 +13,4 @@
 #
 #Family-model,Version,Filename,EventType
 0x00000000420f5160,v1,cavium,core
+0x00000000410fd03[[:xdigit:]],v1,cortex-a53,core
index 4035d43523c3ed952ba53201e7666bc87f19ebd7..e0b1b414d466b4601fb9033717b9bed5f0bd9c3e 100644 (file)
@@ -31,10 +31,12 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
        int i;
 
        for (i = 0; i < evlist->nr_mmaps; i++) {
+               struct perf_mmap *map = &evlist->overwrite_mmap[i];
                union perf_event *event;
+               u64 start, end;
 
-               perf_mmap__read_catchup(&evlist->overwrite_mmap[i]);
-               while ((event = perf_mmap__read_backward(&evlist->overwrite_mmap[i])) != NULL) {
+               perf_mmap__read_init(map, true, &start, &end);
+               while ((event = perf_mmap__read_event(map, true, &start, end)) != NULL) {
                        const u32 type = event->header.type;
 
                        switch (type) {
@@ -49,6 +51,7 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
                                return TEST_FAIL;
                        }
                }
+               perf_mmap__read_done(map);
        }
        return TEST_OK;
 }
index 8b3da21a08f19a110a3e9c2519495460da2eb310..c446c894b2973ab7e89d8d910e574f2206cc3ace 100755 (executable)
@@ -22,10 +22,23 @@ trace_libc_inet_pton_backtrace() {
        expected[4]="rtt min.*"
        expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
        expected[6]=".*inet_pton[[:space:]]\($libc\)$"
-       expected[7]="getaddrinfo[[:space:]]\($libc\)$"
-       expected[8]=".*\(.*/bin/ping.*\)$"
-
-       perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
+       case "$(uname -m)" in
+       s390x)
+               eventattr='call-graph=dwarf'
+               expected[7]="gaih_inet[[:space:]]\(inlined\)$"
+               expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$"
+               expected[9]="main[[:space:]]\(.*/bin/ping.*\)$"
+               expected[10]="__libc_start_main[[:space:]]\($libc\)$"
+               expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$"
+               ;;
+       *)
+               eventattr='max-stack=3'
+               expected[7]="getaddrinfo[[:space:]]\($libc\)$"
+               expected[8]=".*\(.*/bin/ping.*\)$"
+               ;;
+       esac
+
+       perf trace --no-syscalls -e probe_libc:inet_pton/$eventattr/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
                echo $line
                echo "$line" | egrep -q "${expected[$idx]}"
                if [ $? -ne 0 ] ; then
@@ -33,7 +46,7 @@ trace_libc_inet_pton_backtrace() {
                        exit 1
                fi
                let idx+=1
-               [ $idx -eq 9 ] && break
+               [ -z "${expected[$idx]}" ] && break
        done
 }
 
index 68146f4620a5730028c24c364001ce5e21766aad..6495ee55d9c38feb75e207549e5634e36fd2c378 100644 (file)
@@ -608,7 +608,8 @@ static int hist_browser__title(struct hist_browser *browser, char *bf, size_t si
        return browser->title ? browser->title(browser, bf, size) : 0;
 }
 
-int hist_browser__run(struct hist_browser *browser, const char *help)
+int hist_browser__run(struct hist_browser *browser, const char *help,
+                     bool warn_lost_event)
 {
        int key;
        char title[160];
@@ -638,8 +639,9 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
                        nr_entries = hist_browser__nr_entries(browser);
                        ui_browser__update_nr_entries(&browser->b, nr_entries);
 
-                       if (browser->hists->stats.nr_lost_warned !=
-                           browser->hists->stats.nr_events[PERF_RECORD_LOST]) {
+                       if (warn_lost_event &&
+                           (browser->hists->stats.nr_lost_warned !=
+                           browser->hists->stats.nr_events[PERF_RECORD_LOST])) {
                                browser->hists->stats.nr_lost_warned =
                                        browser->hists->stats.nr_events[PERF_RECORD_LOST];
                                ui_browser__warn_lost_events(&browser->b);
@@ -2763,7 +2765,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                    bool left_exits,
                                    struct hist_browser_timer *hbt,
                                    float min_pcnt,
-                                   struct perf_env *env)
+                                   struct perf_env *env,
+                                   bool warn_lost_event)
 {
        struct hists *hists = evsel__hists(evsel);
        struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env);
@@ -2844,7 +2847,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
 
                nr_options = 0;
 
-               key = hist_browser__run(browser, helpline);
+               key = hist_browser__run(browser, helpline,
+                                       warn_lost_event);
 
                if (browser->he_selection != NULL) {
                        thread = hist_browser__selected_thread(browser);
@@ -3184,7 +3188,8 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
 
 static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
                                int nr_events, const char *help,
-                               struct hist_browser_timer *hbt)
+                               struct hist_browser_timer *hbt,
+                               bool warn_lost_event)
 {
        struct perf_evlist *evlist = menu->b.priv;
        struct perf_evsel *pos;
@@ -3203,7 +3208,9 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
                case K_TIMER:
                        hbt->timer(hbt->arg);
 
-                       if (!menu->lost_events_warned && menu->lost_events) {
+                       if (!menu->lost_events_warned &&
+                           menu->lost_events &&
+                           warn_lost_event) {
                                ui_browser__warn_lost_events(&menu->b);
                                menu->lost_events_warned = true;
                        }
@@ -3224,7 +3231,8 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
                        key = perf_evsel__hists_browse(pos, nr_events, help,
                                                       true, hbt,
                                                       menu->min_pcnt,
-                                                      menu->env);
+                                                      menu->env,
+                                                      warn_lost_event);
                        ui_browser__show_title(&menu->b, title);
                        switch (key) {
                        case K_TAB:
@@ -3282,7 +3290,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
                                           int nr_entries, const char *help,
                                           struct hist_browser_timer *hbt,
                                           float min_pcnt,
-                                          struct perf_env *env)
+                                          struct perf_env *env,
+                                          bool warn_lost_event)
 {
        struct perf_evsel *pos;
        struct perf_evsel_menu menu = {
@@ -3309,13 +3318,15 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
                        menu.b.width = line_len;
        }
 
-       return perf_evsel_menu__run(&menu, nr_entries, help, hbt);
+       return perf_evsel_menu__run(&menu, nr_entries, help,
+                                   hbt, warn_lost_event);
 }
 
 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
                                  struct hist_browser_timer *hbt,
                                  float min_pcnt,
-                                 struct perf_env *env)
+                                 struct perf_env *env,
+                                 bool warn_lost_event)
 {
        int nr_entries = evlist->nr_entries;
 
@@ -3325,7 +3336,7 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
 
                return perf_evsel__hists_browse(first, nr_entries, help,
                                                false, hbt, min_pcnt,
-                                               env);
+                                               env, warn_lost_event);
        }
 
        if (symbol_conf.event_group) {
@@ -3342,5 +3353,6 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
        }
 
        return __perf_evlist__tui_browse_hists(evlist, nr_entries, help,
-                                              hbt, min_pcnt, env);
+                                              hbt, min_pcnt, env,
+                                              warn_lost_event);
 }
index ba431777f5590838aded811a70ad1d50ac492198..9428bee076f24cecc65581c85544f5e4ba68bd26 100644 (file)
@@ -28,7 +28,8 @@ struct hist_browser {
 
 struct hist_browser *hist_browser__new(struct hists *hists);
 void hist_browser__delete(struct hist_browser *browser);
-int hist_browser__run(struct hist_browser *browser, const char *help);
+int hist_browser__run(struct hist_browser *browser, const char *help,
+                     bool warn_lost_event);
 void hist_browser__init(struct hist_browser *browser,
                        struct hists *hists);
 #endif /* _PERF_UI_BROWSER_HISTS_H_ */
index ac35cd214feb224cfc61ed743c257dffaad19699..e5fc14e53c0510cff4676b6620d507fbd563b9ac 100644 (file)
@@ -715,28 +715,11 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int
        return perf_mmap__read_forward(md);
 }
 
-union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
-{
-       struct perf_mmap *md = &evlist->mmap[idx];
-
-       /*
-        * No need to check messup for backward ring buffer:
-        * We can always read arbitrary long data from a backward
-        * ring buffer unless we forget to pause it before reading.
-        */
-       return perf_mmap__read_backward(md);
-}
-
 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
 {
        return perf_evlist__mmap_read_forward(evlist, idx);
 }
 
-void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
-{
-       perf_mmap__read_catchup(&evlist->mmap[idx]);
-}
-
 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
 {
        perf_mmap__consume(&evlist->mmap[idx], false);
index 75f8e0ad5d765f3412c5b34734bb019189056b54..336b838e6957e503da3f73276d828067c8484386 100644 (file)
@@ -133,10 +133,6 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
 
 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
                                                 int idx);
-union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
-                                                 int idx);
-void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
-
 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
 
 int perf_evlist__open(struct perf_evlist *evlist);
index ff359c9ece2e76c3d15fb8d2acf36e4c3bf8ade6..ef351688b79798a1c46b95094fb078c6daee14aa 100644 (file)
 
 #include "sane_ctype.h"
 
-static struct {
-       bool sample_id_all;
-       bool exclude_guest;
-       bool mmap2;
-       bool cloexec;
-       bool clockid;
-       bool clockid_wrong;
-       bool lbr_flags;
-       bool write_backward;
-       bool group_read;
-} perf_missing_features;
+struct perf_missing_features perf_missing_features;
 
 static clockid_t clockid;
 
index 846e416445254f6090a1280305106a6c581b715a..a7487c6d186607e9ca9fc4858c36c29f82015e1d 100644 (file)
@@ -149,6 +149,20 @@ union u64_swap {
        u32 val32[2];
 };
 
+struct perf_missing_features {
+       bool sample_id_all;
+       bool exclude_guest;
+       bool mmap2;
+       bool cloexec;
+       bool clockid;
+       bool clockid_wrong;
+       bool lbr_flags;
+       bool write_backward;
+       bool group_read;
+};
+
+extern struct perf_missing_features perf_missing_features;
+
 struct cpu_map;
 struct target;
 struct thread_map;
index f6630cb95effc353deba329a77dc6a87e0f6279f..02721b5797464605519967969e0ec78f0038123c 100644 (file)
@@ -430,7 +430,8 @@ int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
                                  struct hist_browser_timer *hbt,
                                  float min_pcnt,
-                                 struct perf_env *env);
+                                 struct perf_env *env,
+                                 bool warn_lost_event);
 int script_browse(const char *script_opt);
 #else
 static inline
@@ -438,7 +439,8 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
                                  const char *help __maybe_unused,
                                  struct hist_browser_timer *hbt __maybe_unused,
                                  float min_pcnt __maybe_unused,
-                                 struct perf_env *env __maybe_unused)
+                                 struct perf_env *env __maybe_unused,
+                                 bool warn_lost_event __maybe_unused)
 {
        return 0;
 }
index 05076e6839382e4d098562f3c42e317d038e971a..91531a7c8fbf38606ee52ca0c3fef57bb2a9cff9 100644 (file)
@@ -22,29 +22,27 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map)
 
 /* When check_messup is true, 'end' must points to a good entry */
 static union perf_event *perf_mmap__read(struct perf_mmap *map,
-                                        u64 start, u64 end, u64 *prev)
+                                        u64 *startp, u64 end)
 {
        unsigned char *data = map->base + page_size;
        union perf_event *event = NULL;
-       int diff = end - start;
+       int diff = end - *startp;
 
        if (diff >= (int)sizeof(event->header)) {
                size_t size;
 
-               event = (union perf_event *)&data[start & map->mask];
+               event = (union perf_event *)&data[*startp & map->mask];
                size = event->header.size;
 
-               if (size < sizeof(event->header) || diff < (int)size) {
-                       event = NULL;
-                       goto broken_event;
-               }
+               if (size < sizeof(event->header) || diff < (int)size)
+                       return NULL;
 
                /*
                 * Event straddles the mmap boundary -- header should always
                 * be inside due to u64 alignment of output.
                 */
-               if ((start & map->mask) + size != ((start + size) & map->mask)) {
-                       unsigned int offset = start;
+               if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
+                       unsigned int offset = *startp;
                        unsigned int len = min(sizeof(*event), size), cpy;
                        void *dst = map->event_copy;
 
@@ -59,20 +57,19 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
                        event = (union perf_event *)map->event_copy;
                }
 
-               start += size;
+               *startp += size;
        }
 
-broken_event:
-       if (prev)
-               *prev = start;
-
        return event;
 }
 
+/*
+ * legacy interface for mmap read.
+ * Don't use it. Use perf_mmap__read_event().
+ */
 union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
 {
        u64 head;
-       u64 old = map->prev;
 
        /*
         * Check if event was unmapped due to a POLLHUP/POLLERR.
@@ -82,13 +79,26 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
 
        head = perf_mmap__read_head(map);
 
-       return perf_mmap__read(map, old, head, &map->prev);
+       return perf_mmap__read(map, &map->prev, head);
 }
 
-union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
+/*
+ * Read event from ring buffer one by one.
+ * Return one event for each call.
+ *
+ * Usage:
+ * perf_mmap__read_init()
+ * while(event = perf_mmap__read_event()) {
+ *     //process the event
+ *     perf_mmap__consume()
+ * }
+ * perf_mmap__read_done()
+ */
+union perf_event *perf_mmap__read_event(struct perf_mmap *map,
+                                       bool overwrite,
+                                       u64 *startp, u64 end)
 {
-       u64 head, end;
-       u64 start = map->prev;
+       union perf_event *event;
 
        /*
         * Check if event was unmapped due to a POLLHUP/POLLERR.
@@ -96,40 +106,19 @@ union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
        if (!refcount_read(&map->refcnt))
                return NULL;
 
-       head = perf_mmap__read_head(map);
-       if (!head)
+       if (startp == NULL)
                return NULL;
 
-       /*
-        * 'head' pointer starts from 0. Kernel minus sizeof(record) form
-        * it each time when kernel writes to it, so in fact 'head' is
-        * negative. 'end' pointer is made manually by adding the size of
-        * the ring buffer to 'head' pointer, means the validate data can
-        * read is the whole ring buffer. If 'end' is positive, the ring
-        * buffer has not fully filled, so we must adjust 'end' to 0.
-        *
-        * However, since both 'head' and 'end' is unsigned, we can't
-        * simply compare 'end' against 0. Here we compare '-head' and
-        * the size of the ring buffer, where -head is the number of bytes
-        * kernel write to the ring buffer.
-        */
-       if (-head < (u64)(map->mask + 1))
-               end = 0;
-       else
-               end = head + map->mask + 1;
-
-       return perf_mmap__read(map, start, end, &map->prev);
-}
+       /* non-overwirte doesn't pause the ringbuffer */
+       if (!overwrite)
+               end = perf_mmap__read_head(map);
 
-void perf_mmap__read_catchup(struct perf_mmap *map)
-{
-       u64 head;
+       event = perf_mmap__read(map, startp, end);
 
-       if (!refcount_read(&map->refcnt))
-               return;
+       if (!overwrite)
+               map->prev = *startp;
 
-       head = perf_mmap__read_head(map);
-       map->prev = head;
+       return event;
 }
 
 static bool perf_mmap__empty(struct perf_mmap *map)
@@ -267,41 +256,60 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u6
        return -1;
 }
 
-int perf_mmap__push(struct perf_mmap *md, bool overwrite,
-                   void *to, int push(void *to, void *buf, size_t size))
+/*
+ * Report the start and end of the available data in ringbuffer
+ */
+int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
+                        u64 *startp, u64 *endp)
 {
        u64 head = perf_mmap__read_head(md);
        u64 old = md->prev;
-       u64 end = head, start = old;
        unsigned char *data = md->base + page_size;
        unsigned long size;
-       void *buf;
-       int rc = 0;
 
-       start = overwrite ? head : old;
-       end = overwrite ? old : head;
+       *startp = overwrite ? head : old;
+       *endp = overwrite ? old : head;
 
-       if (start == end)
-               return 0;
+       if (*startp == *endp)
+               return -EAGAIN;
 
-       size = end - start;
+       size = *endp - *startp;
        if (size > (unsigned long)(md->mask) + 1) {
                if (!overwrite) {
                        WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
 
                        md->prev = head;
                        perf_mmap__consume(md, overwrite);
-                       return 0;
+                       return -EAGAIN;
                }
 
                /*
                 * Backward ring buffer is full. We still have a chance to read
                 * most of data from it.
                 */
-               if (overwrite_rb_find_range(data, md->mask, head, &start, &end))
-                       return -1;
+               if (overwrite_rb_find_range(data, md->mask, head, startp, endp))
+                       return -EINVAL;
        }
 
+       return 0;
+}
+
+int perf_mmap__push(struct perf_mmap *md, bool overwrite,
+                   void *to, int push(void *to, void *buf, size_t size))
+{
+       u64 head = perf_mmap__read_head(md);
+       u64 end, start;
+       unsigned char *data = md->base + page_size;
+       unsigned long size;
+       void *buf;
+       int rc = 0;
+
+       rc = perf_mmap__read_init(md, overwrite, &start, &end);
+       if (rc < 0)
+               return (rc == -EAGAIN) ? 0 : -1;
+
+       size = end - start;
+
        if ((start & md->mask) + size != (end & md->mask)) {
                buf = &data[start & md->mask];
                size = md->mask + 1 - (start & md->mask);
@@ -327,3 +335,14 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite,
 out:
        return rc;
 }
+
+/*
+ * Mandatory for overwrite mode
+ * The direction of overwrite mode is backward.
+ * The last perf_mmap__read() will set tail to map->prev.
+ * Need to correct the map->prev to head which is the end of next read.
+ */
+void perf_mmap__read_done(struct perf_mmap *map)
+{
+       map->prev = perf_mmap__read_head(map);
+}
index e43d7b55a55f6578615cb1597bd676bf90924a78..ec7d3a24e276fbbe16aff12abf56363cc684cce5 100644 (file)
@@ -65,8 +65,6 @@ void perf_mmap__put(struct perf_mmap *map);
 
 void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
 
-void perf_mmap__read_catchup(struct perf_mmap *md);
-
 static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
 {
        struct perf_event_mmap_page *pc = mm->base;
@@ -87,11 +85,17 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
 }
 
 union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
-union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
+
+union perf_event *perf_mmap__read_event(struct perf_mmap *map,
+                                       bool overwrite,
+                                       u64 *startp, u64 end);
 
 int perf_mmap__push(struct perf_mmap *md, bool backward,
                    void *to, int push(void *to, void *buf, size_t size));
 
 size_t perf_mmap__mmap_len(struct perf_mmap *map);
 
+int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
+                        u64 *startp, u64 *endp);
+void perf_mmap__read_done(struct perf_mmap *map);
 #endif /*__PERF_MMAP_H */
index 443892dabedbe319d8f1f582c3a255c03da5d748..1019bbc5dbd8a00ffb98fa90080fe8881f4d8a78 100644 (file)
@@ -340,35 +340,15 @@ size_t hex_width(u64 v)
        return n;
 }
 
-static int hex(char ch)
-{
-       if ((ch >= '0') && (ch <= '9'))
-               return ch - '0';
-       if ((ch >= 'a') && (ch <= 'f'))
-               return ch - 'a' + 10;
-       if ((ch >= 'A') && (ch <= 'F'))
-               return ch - 'A' + 10;
-       return -1;
-}
-
 /*
  * While we find nice hex chars, build a long_val.
  * Return number of chars processed.
  */
 int hex2u64(const char *ptr, u64 *long_val)
 {
-       const char *p = ptr;
-       *long_val = 0;
-
-       while (*p) {
-               const int hex_val = hex(*p);
+       char *p;
 
-               if (hex_val < 0)
-                       break;
-
-               *long_val = (*long_val << 4) | hex_val;
-               p++;
-       }
+       *long_val = strtoull(ptr, &p, 16);
 
        return p - ptr;
 }
index 39fd362415cfe1a71c9c58eb634b91fc9f676bb2..0f2698f9fd6d8a2547ffe6094ced70c820a894ae 100644 (file)
@@ -57,7 +57,7 @@ volatile int gotsig;
 
 void sighandler(int sig, siginfo_t *info, void *ctx)
 {
-       struct ucontext *ucp = ctx;
+       ucontext_t *ucp = ctx;
 
        if (!testing) {
                signal(sig, SIG_DFL);
index 10ca46df144921ee3bdc60aa6974802dfffa77e8..d744991c0f4f44d56bda208ad3039ad81500f303 100644 (file)
@@ -5,16 +5,26 @@ include ../lib.mk
 
 .PHONY: all all_32 all_64 warn_32bit_failure clean
 
-TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
-                       check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
+UNAME_M := $(shell uname -m)
+CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
+CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
+
+TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
+                       check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \
                        protection_keys test_vdso test_vsyscall
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
                        test_FCMOV test_FCOMI test_FISTTP \
                        vdso_restorer
-TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl
+TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
+# Some selftests require 32bit support enabled also on 64bit systems
+TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall
 
-TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
+TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) $(TARGETS_C_32BIT_NEEDED)
 TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
+ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),11)
+TARGETS_C_64BIT_ALL += $(TARGETS_C_32BIT_NEEDED)
+endif
+
 BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
 BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
 
@@ -23,10 +33,6 @@ BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
 
 CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
 
-UNAME_M := $(shell uname -m)
-CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
-CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
-
 define gen-target-rule-32
 $(1) $(1)_32: $(OUTPUT)/$(1)_32
 .PHONY: $(1) $(1)_32
@@ -40,12 +46,14 @@ endef
 ifeq ($(CAN_BUILD_I386),1)
 all: all_32
 TEST_PROGS += $(BINARIES_32)
+EXTRA_CFLAGS += -DCAN_BUILD_32
 $(foreach t,$(TARGETS_C_32BIT_ALL),$(eval $(call gen-target-rule-32,$(t))))
 endif
 
 ifeq ($(CAN_BUILD_X86_64),1)
 all: all_64
 TEST_PROGS += $(BINARIES_64)
+EXTRA_CFLAGS += -DCAN_BUILD_64
 $(foreach t,$(TARGETS_C_64BIT_ALL),$(eval $(call gen-target-rule-64,$(t))))
 endif
 
index ec0f6b45ce8b4d5f303528daf6efba71258c39c6..9c0325e1ea6844f666bfdcc8204763a8614b9875 100644 (file)
@@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(siginfo_t *si)
        return si->si_upper;
 }
 #else
+
+/*
+ * This deals with old version of _sigfault in some distros:
+ *
+
+old _sigfault:
+        struct {
+            void *si_addr;
+       } _sigfault;
+
+new _sigfault:
+       struct {
+               void __user *_addr;
+               int _trapno;
+               short _addr_lsb;
+               union {
+                       struct {
+                               void __user *_lower;
+                               void __user *_upper;
+                       } _addr_bnd;
+                       __u32 _pkey;
+               };
+       } _sigfault;
+ *
+ */
+
 static inline void **__si_bounds_hack(siginfo_t *si)
 {
        void *sigfault = &si->_sifields._sigfault;
        void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
-       void **__si_lower = end_sigfault;
+       int *trapno = (int*)end_sigfault;
+       /* skip _trapno and _addr_lsb */
+       void **__si_lower = (void**)(trapno + 2);
 
        return __si_lower;
 }
@@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(siginfo_t *si)
 
 static inline void *__si_bounds_upper(siginfo_t *si)
 {
-       return (*__si_bounds_hack(si)) + sizeof(void *);
+       return *(__si_bounds_hack(si) + 1);
 }
 #endif
 
index bc1b0735bb50ed02963e834c7dc38395f7c6d834..f15aa5a76fe3457e96e438c15e7ad40d3c7fbce0 100644 (file)
@@ -393,34 +393,6 @@ pid_t fork_lazy_child(void)
        return forkret;
 }
 
-void davecmp(void *_a, void *_b, int len)
-{
-       int i;
-       unsigned long *a = _a;
-       unsigned long *b = _b;
-
-       for (i = 0; i < len / sizeof(*a); i++) {
-               if (a[i] == b[i])
-                       continue;
-
-               dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]);
-       }
-}
-
-void dumpit(char *f)
-{
-       int fd = open(f, O_RDONLY);
-       char buf[100];
-       int nr_read;
-
-       dprintf2("maps fd: %d\n", fd);
-       do {
-               nr_read = read(fd, &buf[0], sizeof(buf));
-               write(1, buf, nr_read);
-       } while (nr_read > 0);
-       close(fd);
-}
-
 #define PKEY_DISABLE_ACCESS    0x1
 #define PKEY_DISABLE_WRITE     0x2
 
index a48da95c18fdf1f0ea46e7cb628ff9a9caba931b..ddfdd635de16c68b13c01a8782ebdc0ecd3b8b0d 100644 (file)
@@ -119,7 +119,9 @@ static void check_result(void)
 
 int main()
 {
+#ifdef CAN_BUILD_32
        int tmp;
+#endif
 
        sethandler(SIGTRAP, sigtrap, 0);
 
@@ -139,12 +141,13 @@ int main()
                      : : "c" (post_nop) : "r11");
        check_result();
 #endif
-
+#ifdef CAN_BUILD_32
        printf("[RUN]\tSet TF and check int80\n");
        set_eflags(get_eflags() | X86_EFLAGS_TF);
        asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
                        : INT80_CLOBBERS);
        check_result();
+#endif
 
        /*
         * This test is particularly interesting if fast syscalls use
index bf0d687c7db75e5b03cf756f731082ca08229799..64f11c8d9b767d31bb3247e7424fd8f708d85d05 100644 (file)
@@ -90,8 +90,12 @@ int main(int argc, char **argv, char **envp)
                        vdso_size += PAGE_SIZE;
                }
 
+#ifdef __i386__
                /* Glibc is likely to explode now - exit with raw syscall */
                asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
+#else /* __x86_64__ */
+               syscall(SYS_exit, ret);
+#endif
        } else {
                int status;
 
index 29973cde06d3db08f5219dc692c82be82624e581..2352590117042ebf79ddd946a00d8c49d38d786e 100644 (file)
 # endif
 #endif
 
+/* max length of lines in /proc/self/maps - anything longer is skipped here */
+#define MAPS_LINE_LEN 128
+
 int nerrs = 0;
 
+typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+
+getcpu_t vgetcpu;
+getcpu_t vdso_getcpu;
+
+static void *vsyscall_getcpu(void)
+{
 #ifdef __x86_64__
-# define VSYS(x) (x)
+       FILE *maps;
+       char line[MAPS_LINE_LEN];
+       bool found = false;
+
+       maps = fopen("/proc/self/maps", "r");
+       if (!maps) /* might still be present, but ignore it here, as we test vDSO not vsyscall */
+               return NULL;
+
+       while (fgets(line, MAPS_LINE_LEN, maps)) {
+               char r, x;
+               void *start, *end;
+               char name[MAPS_LINE_LEN];
+
+               /* sscanf() is safe here as strlen(name) >= strlen(line) */
+               if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
+                          &start, &end, &r, &x, name) != 5)
+                       continue;
+
+               if (strcmp(name, "[vsyscall]"))
+                       continue;
+
+               /* assume entries are OK, as we test vDSO here not vsyscall */
+               found = true;
+               break;
+       }
+
+       fclose(maps);
+
+       if (!found) {
+               printf("Warning: failed to find vsyscall getcpu\n");
+               return NULL;
+       }
+       return (void *) (0xffffffffff600800);
 #else
-# define VSYS(x) 0
+       return NULL;
 #endif
+}
 
-typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
-
-const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
-getcpu_t vdso_getcpu;
 
-void fill_function_pointers()
+static void fill_function_pointers()
 {
        void *vdso = dlopen("linux-vdso.so.1",
                            RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
@@ -54,6 +93,8 @@ void fill_function_pointers()
        vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
        if (!vdso_getcpu)
                printf("Warning: failed to find getcpu in vDSO\n");
+
+       vgetcpu = (getcpu_t) vsyscall_getcpu();
 }
 
 static long sys_getcpu(unsigned * cpu, unsigned * node,
index 7a744fa7b78655dfc9327e5f50d40dd1dd80476a..be81621446f01cf020c68d690e9772e49e736baf 100644 (file)
@@ -33,6 +33,9 @@
 # endif
 #endif
 
+/* max length of lines in /proc/self/maps - anything longer is skipped here */
+#define MAPS_LINE_LEN 128
+
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                       int flags)
 {
@@ -98,7 +101,7 @@ static int init_vsys(void)
 #ifdef __x86_64__
        int nerrs = 0;
        FILE *maps;
-       char line[128];
+       char line[MAPS_LINE_LEN];
        bool found = false;
 
        maps = fopen("/proc/self/maps", "r");
@@ -108,10 +111,12 @@ static int init_vsys(void)
                return 0;
        }
 
-       while (fgets(line, sizeof(line), maps)) {
+       while (fgets(line, MAPS_LINE_LEN, maps)) {
                char r, x;
                void *start, *end;
-               char name[128];
+               char name[MAPS_LINE_LEN];
+
+               /* sscanf() is safe here as strlen(name) >= strlen(line) */
                if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
                           &start, &end, &r, &x, name) != 5)
                        continue;