]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
vdso: Define standardized vdso_datapage
authorVincenzo Frascino <vincenzo.frascino@arm.com>
Fri, 21 Jun 2019 09:52:28 +0000 (10:52 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 22 Jun 2019 19:21:05 +0000 (21:21 +0200)
Define a common formet for the vdso datapage as a preparation for sharing
the VDSO implementation as a generic library.

The datastructures are based on the current x86 layout.

[ tglx: Massaged changelog ]

Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Shijith Thotton <sthotton@marvell.com>
Tested-by: Andre Przywara <andre.przywara@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-mips@vger.kernel.org
Cc: linux-kselftest@vger.kernel.org
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Mark Salyzyn <salyzyn@android.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Dmitry Safonov <0x7f454c46@gmail.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Huw Davies <huw@codeweavers.com>
Link: https://lkml.kernel.org/r/20190621095252.32307-2-vincenzo.frascino@arm.com
include/vdso/datapage.h [new file with mode: 0644]

diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
new file mode 100644 (file)
index 0000000..e6eb36c
--- /dev/null
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_DATAPAGE_H
+#define __VDSO_DATAPAGE_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bits.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+#define VDSO_BASES     (CLOCK_TAI + 1)
+#define VDSO_HRES      (BIT(CLOCK_REALTIME)            | \
+                        BIT(CLOCK_MONOTONIC)           | \
+                        BIT(CLOCK_BOOTTIME)            | \
+                        BIT(CLOCK_TAI))
+#define VDSO_COARSE    (BIT(CLOCK_REALTIME_COARSE)     | \
+                        BIT(CLOCK_MONOTONIC_COARSE))
+#define VDSO_RAW       (BIT(CLOCK_MONOTONIC_RAW))
+
+#define CS_HRES_COARSE 0
+#define CS_RAW         1
+#define CS_BASES       (CS_RAW + 1)
+
+/**
+ * struct vdso_timestamp - basetime per clock_id
+ * @sec:       seconds
+ * @nsec:      nanoseconds
+ *
+ * There is one vdso_timestamp object in vvar for each vDSO-accelerated
+ * clock_id. For high-resolution clocks, this encodes the time
+ * corresponding to vdso_data.cycle_last. For coarse clocks this encodes
+ * the actual time.
+ *
+ * To be noticed that for highres clocks nsec is left-shifted by
+ * vdso_data.cs[x].shift.
+ */
+struct vdso_timestamp {
+       u64     sec;
+       u64     nsec;
+};
+
+/**
+ * struct vdso_data - vdso datapage representation
+ * @seq:               timebase sequence counter
+ * @clock_mode:                clock mode
+ * @cycle_last:                timebase at clocksource init
+ * @mask:              clocksource mask
+ * @mult:              clocksource multiplier
+ * @shift:             clocksource shift
+ * @basetime[clock_id]:        basetime per clock_id
+ * @tz_minuteswest:    minutes west of Greenwich
+ * @tz_dsttime:                type of DST correction
+ * @hrtimer_res:       hrtimer resolution
+ * @__unused:          unused
+ *
+ * vdso_data will be accessed by 64 bit and compat code at the same time
+ * so we should be careful before modifying this structure.
+ */
+struct vdso_data {
+       u32                     seq;
+
+       s32                     clock_mode;
+       u64                     cycle_last;
+       u64                     mask;
+       u32                     mult;
+       u32                     shift;
+
+       struct vdso_timestamp   basetime[VDSO_BASES];
+
+       s32                     tz_minuteswest;
+       s32                     tz_dsttime;
+       u32                     hrtimer_res;
+       u32                     __unused;
+};
+
+/*
+ * We use the hidden visibility to prevent the compiler from generating a GOT
+ * relocation. Not only is going through a GOT useless (the entry couldn't and
+ * must not be overridden by another library), it does not even work: the linker
+ * cannot generate an absolute address to the data page.
+ *
+ * With the hidden visibility, the compiler simply generates a PC-relative
+ * relocation, and this is what we need.
+ */
+extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* __VDSO_DATAPAGE_H */