1 // SPDX-License-Identifier: GPL-2.0+
3 * 2002-10-15 Posix Clocks & timers
4 * by George Anzinger george@mvista.com
5 * Copyright (C) 2002 2003 by MontaVista Software.
7 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
8 * Copyright (C) 2004 Boris Hu
10 * These are all the functions necessary to implement POSIX clocks & timers
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/time.h>
16 #include <linux/mutex.h>
17 #include <linux/sched/task.h>
19 #include <linux/uaccess.h>
20 #include <linux/list.h>
21 #include <linux/init.h>
22 #include <linux/compiler.h>
23 #include <linux/hash.h>
24 #include <linux/posix-clock.h>
25 #include <linux/posix-timers.h>
26 #include <linux/syscalls.h>
27 #include <linux/wait.h>
28 #include <linux/workqueue.h>
29 #include <linux/export.h>
30 #include <linux/hashtable.h>
31 #include <linux/compat.h>
32 #include <linux/nospec.h>
33 #include <linux/time_namespace.h>
35 #include "timekeeping.h"
36 #include "posix-timers.h"
39 * Management arrays for POSIX timers. Timers are now kept in static hash table
41 * Timer ids are allocated by local routine, which selects proper hash head by
42 * key, constructed from current->signal address and per signal struct counter.
43 * This keeps timer ids unique per process, but now they can intersect between
48 * Lets keep our timers in a slab cache :-)
50 static struct kmem_cache *posix_timers_cache;
52 static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
53 static DEFINE_SPINLOCK(hash_lock);
55 static const struct k_clock * const posix_clocks[];
56 static const struct k_clock *clockid_to_kclock(const clockid_t id);
57 static const struct k_clock clock_realtime, clock_monotonic;
60 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
61 * SIGEV values. Here we put out an error if this assumption fails.
63 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
64 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
65 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
69 * The timer ID is turned into a timer address by idr_find().
70 * Verifying a valid ID consists of:
72 * a) checking that idr_find() returns other than -1.
73 * b) checking that the timer id matches the one in the timer itself.
74 * c) that the timer owner is in the callers thread group.
78 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
79 * to implement others. This structure defines the various
82 * RESOLUTION: Clock resolution is used to round up timer and interval
83 * times, NOT to report clock times, which are reported with as
84 * much resolution as the system can muster. In some cases this
85 * resolution may depend on the underlying clock hardware and
86 * may not be quantifiable until run time, and only then is the
87 * necessary code is written. The standard says we should say
88 * something about this issue in the documentation...
90 * FUNCTIONS: The CLOCKs structure defines possible functions to
91 * handle various clock functions.
93 * The standard POSIX timer management code assumes the
94 * following: 1.) The k_itimer struct (sched.h) is used for
95 * the timer. 2.) The list, it_lock, it_clock, it_id and
96 * it_pid fields are not modified by timer code.
98 * Permissions: It is assumed that the clock_settime() function defined
99 * for each clock will take care of permission checks. Some
100 * clocks may be set able by any user (i.e. local process
101 * clocks) others not. Currently the only set able clock we
102 * have is CLOCK_REALTIME and its high res counter part, both of
103 * which we beg off on and pass to do_sys_settimeofday().
105 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
107 #define lock_timer(tid, flags) \
108 ({ struct k_itimer *__timr; \
109 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
113 static int hash(struct signal_struct *sig, unsigned int nr)
115 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
118 static struct k_itimer *__posix_timers_find(struct hlist_head *head,
119 struct signal_struct *sig,
122 struct k_itimer *timer;
124 hlist_for_each_entry_rcu(timer, head, t_hash) {
125 if ((timer->it_signal == sig) && (timer->it_id == id))
131 static struct k_itimer *posix_timer_by_id(timer_t id)
133 struct signal_struct *sig = current->signal;
134 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
136 return __posix_timers_find(head, sig, id);
139 static int posix_timer_add(struct k_itimer *timer)
141 struct signal_struct *sig = current->signal;
142 int first_free_id = sig->posix_timer_id;
143 struct hlist_head *head;
147 spin_lock(&hash_lock);
148 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
149 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
150 hlist_add_head_rcu(&timer->t_hash, head);
151 ret = sig->posix_timer_id;
153 if (++sig->posix_timer_id < 0)
154 sig->posix_timer_id = 0;
155 if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
156 /* Loop over all possible ids completed */
158 spin_unlock(&hash_lock);
159 } while (ret == -ENOENT);
163 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
165 spin_unlock_irqrestore(&timr->it_lock, flags);
168 /* Get clock_realtime */
169 static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp)
171 ktime_get_real_ts64(tp);
175 static ktime_t posix_get_realtime_ktime(clockid_t which_clock)
177 return ktime_get_real();
180 /* Set clock_realtime */
181 static int posix_clock_realtime_set(const clockid_t which_clock,
182 const struct timespec64 *tp)
184 return do_sys_settimeofday64(tp, NULL);
187 static int posix_clock_realtime_adj(const clockid_t which_clock,
188 struct __kernel_timex *t)
190 return do_adjtimex(t);
194 * Get monotonic time for posix timers
196 static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp)
199 timens_add_monotonic(tp);
203 static ktime_t posix_get_monotonic_ktime(clockid_t which_clock)
209 * Get monotonic-raw time for posix timers
211 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
213 ktime_get_raw_ts64(tp);
214 timens_add_monotonic(tp);
219 static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
221 ktime_get_coarse_real_ts64(tp);
225 static int posix_get_monotonic_coarse(clockid_t which_clock,
226 struct timespec64 *tp)
228 ktime_get_coarse_ts64(tp);
229 timens_add_monotonic(tp);
233 static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
235 *tp = ktime_to_timespec64(KTIME_LOW_RES);
239 static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp)
241 ktime_get_boottime_ts64(tp);
242 timens_add_boottime(tp);
246 static ktime_t posix_get_boottime_ktime(const clockid_t which_clock)
248 return ktime_get_boottime();
251 static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp)
253 ktime_get_clocktai_ts64(tp);
257 static ktime_t posix_get_tai_ktime(clockid_t which_clock)
259 return ktime_get_clocktai();
262 static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
265 tp->tv_nsec = hrtimer_resolution;
270 * Initialize everything, well, just everything in Posix clocks/timers ;)
272 static __init int init_posix_timers(void)
274 posix_timers_cache = kmem_cache_create("posix_timers_cache",
275 sizeof (struct k_itimer), 0, SLAB_PANIC,
279 __initcall(init_posix_timers);
282 * The siginfo si_overrun field and the return value of timer_getoverrun(2)
283 * are of type int. Clamp the overrun value to INT_MAX
285 static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
287 s64 sum = timr->it_overrun_last + (s64)baseval;
289 return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
292 static void common_hrtimer_rearm(struct k_itimer *timr)
294 struct hrtimer *timer = &timr->it.real.timer;
296 timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
298 hrtimer_restart(timer);
302 * This function is exported for use by the signal deliver code. It is
303 * called just prior to the info block being released and passes that
304 * block to us. It's function is to update the overrun entry AND to
305 * restart the timer. It should only be called if the timer is to be
306 * restarted (i.e. we have flagged this in the sys_private entry of the
309 * To protect against the timer going away while the interrupt is queued,
310 * we require that the it_requeue_pending flag be set.
312 void posixtimer_rearm(struct kernel_siginfo *info)
314 struct k_itimer *timr;
317 timr = lock_timer(info->si_tid, &flags);
321 if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
322 timr->kclock->timer_rearm(timr);
325 timr->it_overrun_last = timr->it_overrun;
326 timr->it_overrun = -1LL;
327 ++timr->it_requeue_pending;
329 info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
332 unlock_timer(timr, flags);
335 int posix_timer_event(struct k_itimer *timr, int si_private)
340 * FIXME: if ->sigq is queued we can race with
341 * dequeue_signal()->posixtimer_rearm().
343 * If dequeue_signal() sees the "right" value of
344 * si_sys_private it calls posixtimer_rearm().
345 * We re-queue ->sigq and drop ->it_lock().
346 * posixtimer_rearm() locks the timer
347 * and re-schedules it while ->sigq is pending.
348 * Not really bad, but not that we want.
350 timr->sigq->info.si_sys_private = si_private;
352 type = !(timr->it_sigev_notify & SIGEV_THREAD_ID) ? PIDTYPE_TGID : PIDTYPE_PID;
353 ret = send_sigqueue(timr->sigq, timr->it_pid, type);
354 /* If we failed to send the signal the timer stops. */
359 * This function gets called when a POSIX.1b interval timer expires. It
360 * is used as a callback from the kernel internal timer. The
361 * run_timer_list code ALWAYS calls with interrupts on.
363 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
365 static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
367 struct k_itimer *timr;
370 enum hrtimer_restart ret = HRTIMER_NORESTART;
372 timr = container_of(timer, struct k_itimer, it.real.timer);
373 spin_lock_irqsave(&timr->it_lock, flags);
376 if (timr->it_interval != 0)
377 si_private = ++timr->it_requeue_pending;
379 if (posix_timer_event(timr, si_private)) {
381 * signal was not sent because of sig_ignor
382 * we will not get a call back to restart it AND
383 * it should be restarted.
385 if (timr->it_interval != 0) {
386 ktime_t now = hrtimer_cb_get_time(timer);
389 * FIXME: What we really want, is to stop this
390 * timer completely and restart it in case the
391 * SIG_IGN is removed. This is a non trivial
392 * change which involves sighand locking
393 * (sigh !), which we don't want to do late in
396 * For now we just let timers with an interval
397 * less than a jiffie expire every jiffie to
398 * avoid softirq starvation in case of SIG_IGN
399 * and a very small interval, which would put
400 * the timer right back on the softirq pending
401 * list. By moving now ahead of time we trick
402 * hrtimer_forward() to expire the timer
403 * later, while we still maintain the overrun
404 * accuracy, but have some inconsistency in
405 * the timer_gettime() case. This is at least
406 * better than a starved softirq. A more
407 * complex fix which solves also another related
408 * inconsistency is already in the pipeline.
410 #ifdef CONFIG_HIGH_RES_TIMERS
412 ktime_t kj = NSEC_PER_SEC / HZ;
414 if (timr->it_interval < kj)
415 now = ktime_add(now, kj);
418 timr->it_overrun += hrtimer_forward(timer, now,
420 ret = HRTIMER_RESTART;
421 ++timr->it_requeue_pending;
426 unlock_timer(timr, flags);
430 static struct pid *good_sigevent(sigevent_t * event)
432 struct pid *pid = task_tgid(current);
433 struct task_struct *rtn;
435 switch (event->sigev_notify) {
436 case SIGEV_SIGNAL | SIGEV_THREAD_ID:
437 pid = find_vpid(event->sigev_notify_thread_id);
438 rtn = pid_task(pid, PIDTYPE_PID);
439 if (!rtn || !same_thread_group(rtn, current))
444 if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
454 static struct k_itimer * alloc_posix_timer(void)
456 struct k_itimer *tmr;
457 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
460 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
461 kmem_cache_free(posix_timers_cache, tmr);
464 clear_siginfo(&tmr->sigq->info);
468 static void k_itimer_rcu_free(struct rcu_head *head)
470 struct k_itimer *tmr = container_of(head, struct k_itimer, rcu);
472 kmem_cache_free(posix_timers_cache, tmr);
476 #define IT_ID_NOT_SET 0
477 static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
481 spin_lock_irqsave(&hash_lock, flags);
482 hlist_del_rcu(&tmr->t_hash);
483 spin_unlock_irqrestore(&hash_lock, flags);
485 put_pid(tmr->it_pid);
486 sigqueue_free(tmr->sigq);
487 call_rcu(&tmr->rcu, k_itimer_rcu_free);
490 static int common_timer_create(struct k_itimer *new_timer)
492 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
496 /* Create a POSIX.1b interval timer. */
497 static int do_timer_create(clockid_t which_clock, struct sigevent *event,
498 timer_t __user *created_timer_id)
500 const struct k_clock *kc = clockid_to_kclock(which_clock);
501 struct k_itimer *new_timer;
502 int error, new_timer_id;
503 int it_id_set = IT_ID_NOT_SET;
507 if (!kc->timer_create)
510 new_timer = alloc_posix_timer();
511 if (unlikely(!new_timer))
514 spin_lock_init(&new_timer->it_lock);
515 new_timer_id = posix_timer_add(new_timer);
516 if (new_timer_id < 0) {
517 error = new_timer_id;
521 it_id_set = IT_ID_SET;
522 new_timer->it_id = (timer_t) new_timer_id;
523 new_timer->it_clock = which_clock;
524 new_timer->kclock = kc;
525 new_timer->it_overrun = -1LL;
529 new_timer->it_pid = get_pid(good_sigevent(event));
531 if (!new_timer->it_pid) {
535 new_timer->it_sigev_notify = event->sigev_notify;
536 new_timer->sigq->info.si_signo = event->sigev_signo;
537 new_timer->sigq->info.si_value = event->sigev_value;
539 new_timer->it_sigev_notify = SIGEV_SIGNAL;
540 new_timer->sigq->info.si_signo = SIGALRM;
541 memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t));
542 new_timer->sigq->info.si_value.sival_int = new_timer->it_id;
543 new_timer->it_pid = get_pid(task_tgid(current));
546 new_timer->sigq->info.si_tid = new_timer->it_id;
547 new_timer->sigq->info.si_code = SI_TIMER;
549 if (copy_to_user(created_timer_id,
550 &new_timer_id, sizeof (new_timer_id))) {
555 error = kc->timer_create(new_timer);
559 spin_lock_irq(¤t->sighand->siglock);
560 new_timer->it_signal = current->signal;
561 list_add(&new_timer->list, ¤t->signal->posix_timers);
562 spin_unlock_irq(¤t->sighand->siglock);
566 * In the case of the timer belonging to another task, after
567 * the task is unlocked, the timer is owned by the other task
568 * and may cease to exist at any time. Don't use or modify
569 * new_timer after the unlock call.
572 release_posix_timer(new_timer, it_id_set);
576 SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
577 struct sigevent __user *, timer_event_spec,
578 timer_t __user *, created_timer_id)
580 if (timer_event_spec) {
583 if (copy_from_user(&event, timer_event_spec, sizeof (event)))
585 return do_timer_create(which_clock, &event, created_timer_id);
587 return do_timer_create(which_clock, NULL, created_timer_id);
591 COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
592 struct compat_sigevent __user *, timer_event_spec,
593 timer_t __user *, created_timer_id)
595 if (timer_event_spec) {
598 if (get_compat_sigevent(&event, timer_event_spec))
600 return do_timer_create(which_clock, &event, created_timer_id);
602 return do_timer_create(which_clock, NULL, created_timer_id);
607 * Locking issues: We need to protect the result of the id look up until
608 * we get the timer locked down so it is not deleted under us. The
609 * removal is done under the idr spinlock so we use that here to bridge
610 * the find to the timer lock. To avoid a dead lock, the timer id MUST
611 * be release with out holding the timer lock.
613 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
615 struct k_itimer *timr;
618 * timer_t could be any type >= int and we want to make sure any
619 * @timer_id outside positive int range fails lookup.
621 if ((unsigned long long)timer_id > INT_MAX)
625 timr = posix_timer_by_id(timer_id);
627 spin_lock_irqsave(&timr->it_lock, *flags);
628 if (timr->it_signal == current->signal) {
632 spin_unlock_irqrestore(&timr->it_lock, *flags);
639 static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
641 struct hrtimer *timer = &timr->it.real.timer;
643 return __hrtimer_expires_remaining_adjusted(timer, now);
646 static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
648 struct hrtimer *timer = &timr->it.real.timer;
650 return hrtimer_forward(timer, now, timr->it_interval);
654 * Get the time remaining on a POSIX.1b interval timer. This function
655 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
658 * We have a couple of messes to clean up here. First there is the case
659 * of a timer that has a requeue pending. These timers should appear to
660 * be in the timer list with an expiry as if we were to requeue them
663 * The second issue is the SIGEV_NONE timer which may be active but is
664 * not really ever put in the timer list (to save system resources).
665 * This timer may be expired, and if so, we will do it here. Otherwise
666 * it is the same as a requeue pending timer WRT to what we should
669 void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
671 const struct k_clock *kc = timr->kclock;
672 ktime_t now, remaining, iv;
675 sig_none = timr->it_sigev_notify == SIGEV_NONE;
676 iv = timr->it_interval;
678 /* interval timer ? */
680 cur_setting->it_interval = ktime_to_timespec64(iv);
681 } else if (!timr->it_active) {
683 * SIGEV_NONE oneshot timers are never queued. Check them
690 now = kc->clock_get_ktime(timr->it_clock);
693 * When a requeue is pending or this is a SIGEV_NONE timer move the
694 * expiry time forward by intervals, so expiry is > now.
696 if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
697 timr->it_overrun += kc->timer_forward(timr, now);
699 remaining = kc->timer_remaining(timr, now);
700 /* Return 0 only, when the timer is expired and not pending */
701 if (remaining <= 0) {
703 * A single shot SIGEV_NONE timer must return 0, when
707 cur_setting->it_value.tv_nsec = 1;
709 cur_setting->it_value = ktime_to_timespec64(remaining);
713 /* Get the time remaining on a POSIX.1b interval timer. */
714 static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
716 struct k_itimer *timr;
717 const struct k_clock *kc;
721 timr = lock_timer(timer_id, &flags);
725 memset(setting, 0, sizeof(*setting));
727 if (WARN_ON_ONCE(!kc || !kc->timer_get))
730 kc->timer_get(timr, setting);
732 unlock_timer(timr, flags);
736 /* Get the time remaining on a POSIX.1b interval timer. */
737 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
738 struct __kernel_itimerspec __user *, setting)
740 struct itimerspec64 cur_setting;
742 int ret = do_timer_gettime(timer_id, &cur_setting);
744 if (put_itimerspec64(&cur_setting, setting))
750 #ifdef CONFIG_COMPAT_32BIT_TIME
752 SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id,
753 struct old_itimerspec32 __user *, setting)
755 struct itimerspec64 cur_setting;
757 int ret = do_timer_gettime(timer_id, &cur_setting);
759 if (put_old_itimerspec32(&cur_setting, setting))
768 * Get the number of overruns of a POSIX.1b interval timer. This is to
769 * be the overrun of the timer last delivered. At the same time we are
770 * accumulating overruns on the next timer. The overrun is frozen when
771 * the signal is delivered, either at the notify time (if the info block
772 * is not queued) or at the actual delivery time (as we are informed by
773 * the call back to posixtimer_rearm(). So all we need to do is
774 * to pick up the frozen overrun.
776 SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
778 struct k_itimer *timr;
782 timr = lock_timer(timer_id, &flags);
786 overrun = timer_overrun_to_int(timr, 0);
787 unlock_timer(timr, flags);
792 static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
793 bool absolute, bool sigev_none)
795 struct hrtimer *timer = &timr->it.real.timer;
796 enum hrtimer_mode mode;
798 mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
800 * Posix magic: Relative CLOCK_REALTIME timers are not affected by
801 * clock modifications, so they become CLOCK_MONOTONIC based under the
802 * hood. See hrtimer_init(). Update timr->kclock, so the generic
803 * functions which use timr->kclock->clock_get_*() work.
805 * Note: it_clock stays unmodified, because the next timer_set() might
806 * use ABSTIME, so it needs to switch back.
808 if (timr->it_clock == CLOCK_REALTIME)
809 timr->kclock = absolute ? &clock_realtime : &clock_monotonic;
811 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
812 timr->it.real.timer.function = posix_timer_fn;
815 expires = ktime_add_safe(expires, timer->base->get_time());
816 hrtimer_set_expires(timer, expires);
819 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
822 static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
824 return hrtimer_try_to_cancel(&timr->it.real.timer);
827 static void common_timer_wait_running(struct k_itimer *timer)
829 hrtimer_cancel_wait_running(&timer->it.real.timer);
833 * On PREEMPT_RT this prevent priority inversion against softirq kthread in
834 * case it gets preempted while executing a timer callback. See comments in
835 * hrtimer_cancel_wait_running. For PREEMPT_RT=n this just results in a
838 static struct k_itimer *timer_wait_running(struct k_itimer *timer,
839 unsigned long *flags)
841 const struct k_clock *kc = READ_ONCE(timer->kclock);
842 timer_t timer_id = READ_ONCE(timer->it_id);
844 /* Prevent kfree(timer) after dropping the lock */
846 unlock_timer(timer, *flags);
848 if (!WARN_ON_ONCE(!kc->timer_wait_running))
849 kc->timer_wait_running(timer);
852 /* Relock the timer. It might be not longer hashed. */
853 return lock_timer(timer_id, flags);
856 /* Set a POSIX.1b interval timer. */
857 int common_timer_set(struct k_itimer *timr, int flags,
858 struct itimerspec64 *new_setting,
859 struct itimerspec64 *old_setting)
861 const struct k_clock *kc = timr->kclock;
866 common_timer_get(timr, old_setting);
868 /* Prevent rearming by clearing the interval */
869 timr->it_interval = 0;
871 * Careful here. On SMP systems the timer expiry function could be
872 * active and spinning on timr->it_lock.
874 if (kc->timer_try_to_cancel(timr) < 0)
878 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
880 timr->it_overrun_last = 0;
882 /* Switch off the timer when it_value is zero */
883 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
886 timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
887 expires = timespec64_to_ktime(new_setting->it_value);
888 if (flags & TIMER_ABSTIME)
889 expires = timens_ktime_to_host(timr->it_clock, expires);
890 sigev_none = timr->it_sigev_notify == SIGEV_NONE;
892 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
893 timr->it_active = !sigev_none;
897 static int do_timer_settime(timer_t timer_id, int tmr_flags,
898 struct itimerspec64 *new_spec64,
899 struct itimerspec64 *old_spec64)
901 const struct k_clock *kc;
902 struct k_itimer *timr;
906 if (!timespec64_valid(&new_spec64->it_interval) ||
907 !timespec64_valid(&new_spec64->it_value))
911 memset(old_spec64, 0, sizeof(*old_spec64));
913 timr = lock_timer(timer_id, &flags);
919 if (WARN_ON_ONCE(!kc || !kc->timer_set))
922 error = kc->timer_set(timr, tmr_flags, new_spec64, old_spec64);
924 if (error == TIMER_RETRY) {
925 // We already got the old time...
927 /* Unlocks and relocks the timer if it still exists */
928 timr = timer_wait_running(timr, &flags);
931 unlock_timer(timr, flags);
936 /* Set a POSIX.1b interval timer */
937 SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
938 const struct __kernel_itimerspec __user *, new_setting,
939 struct __kernel_itimerspec __user *, old_setting)
941 struct itimerspec64 new_spec, old_spec;
942 struct itimerspec64 *rtn = old_setting ? &old_spec : NULL;
948 if (get_itimerspec64(&new_spec, new_setting))
951 error = do_timer_settime(timer_id, flags, &new_spec, rtn);
952 if (!error && old_setting) {
953 if (put_itimerspec64(&old_spec, old_setting))
959 #ifdef CONFIG_COMPAT_32BIT_TIME
960 SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags,
961 struct old_itimerspec32 __user *, new,
962 struct old_itimerspec32 __user *, old)
964 struct itimerspec64 new_spec, old_spec;
965 struct itimerspec64 *rtn = old ? &old_spec : NULL;
970 if (get_old_itimerspec32(&new_spec, new))
973 error = do_timer_settime(timer_id, flags, &new_spec, rtn);
975 if (put_old_itimerspec32(&old_spec, old))
982 int common_timer_del(struct k_itimer *timer)
984 const struct k_clock *kc = timer->kclock;
986 timer->it_interval = 0;
987 if (kc->timer_try_to_cancel(timer) < 0)
989 timer->it_active = 0;
993 static inline int timer_delete_hook(struct k_itimer *timer)
995 const struct k_clock *kc = timer->kclock;
997 if (WARN_ON_ONCE(!kc || !kc->timer_del))
999 return kc->timer_del(timer);
1002 /* Delete a POSIX.1b interval timer. */
1003 SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
1005 struct k_itimer *timer;
1006 unsigned long flags;
1008 timer = lock_timer(timer_id, &flags);
1014 if (unlikely(timer_delete_hook(timer) == TIMER_RETRY)) {
1015 /* Unlocks and relocks the timer if it still exists */
1016 timer = timer_wait_running(timer, &flags);
1020 spin_lock(¤t->sighand->siglock);
1021 list_del(&timer->list);
1022 spin_unlock(¤t->sighand->siglock);
1024 * This keeps any tasks waiting on the spin lock from thinking
1025 * they got something (see the lock code above).
1027 timer->it_signal = NULL;
1029 unlock_timer(timer, flags);
1030 release_posix_timer(timer, IT_ID_SET);
1035 * return timer owned by the process, used by exit_itimers
1037 static void itimer_delete(struct k_itimer *timer)
1040 spin_lock_irq(&timer->it_lock);
1042 if (timer_delete_hook(timer) == TIMER_RETRY) {
1043 spin_unlock_irq(&timer->it_lock);
1046 list_del(&timer->list);
1048 spin_unlock_irq(&timer->it_lock);
1049 release_posix_timer(timer, IT_ID_SET);
1053 * This is called by do_exit or de_thread, only when there are no more
1054 * references to the shared signal_struct.
1056 void exit_itimers(struct signal_struct *sig)
1058 struct k_itimer *tmr;
1060 while (!list_empty(&sig->posix_timers)) {
1061 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
1066 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
1067 const struct __kernel_timespec __user *, tp)
1069 const struct k_clock *kc = clockid_to_kclock(which_clock);
1070 struct timespec64 new_tp;
1072 if (!kc || !kc->clock_set)
1075 if (get_timespec64(&new_tp, tp))
1078 return kc->clock_set(which_clock, &new_tp);
1081 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
1082 struct __kernel_timespec __user *, tp)
1084 const struct k_clock *kc = clockid_to_kclock(which_clock);
1085 struct timespec64 kernel_tp;
1091 error = kc->clock_get_timespec(which_clock, &kernel_tp);
1093 if (!error && put_timespec64(&kernel_tp, tp))
1099 int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx)
1101 const struct k_clock *kc = clockid_to_kclock(which_clock);
1108 return kc->clock_adj(which_clock, ktx);
1111 SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
1112 struct __kernel_timex __user *, utx)
1114 struct __kernel_timex ktx;
1117 if (copy_from_user(&ktx, utx, sizeof(ktx)))
1120 err = do_clock_adjtime(which_clock, &ktx);
1122 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
1128 SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
1129 struct __kernel_timespec __user *, tp)
1131 const struct k_clock *kc = clockid_to_kclock(which_clock);
1132 struct timespec64 rtn_tp;
1138 error = kc->clock_getres(which_clock, &rtn_tp);
1140 if (!error && tp && put_timespec64(&rtn_tp, tp))
1146 #ifdef CONFIG_COMPAT_32BIT_TIME
1148 SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock,
1149 struct old_timespec32 __user *, tp)
1151 const struct k_clock *kc = clockid_to_kclock(which_clock);
1152 struct timespec64 ts;
1154 if (!kc || !kc->clock_set)
1157 if (get_old_timespec32(&ts, tp))
1160 return kc->clock_set(which_clock, &ts);
1163 SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock,
1164 struct old_timespec32 __user *, tp)
1166 const struct k_clock *kc = clockid_to_kclock(which_clock);
1167 struct timespec64 ts;
1173 err = kc->clock_get_timespec(which_clock, &ts);
1175 if (!err && put_old_timespec32(&ts, tp))
1181 SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
1182 struct old_timex32 __user *, utp)
1184 struct __kernel_timex ktx;
1187 err = get_old_timex32(&ktx, utp);
1191 err = do_clock_adjtime(which_clock, &ktx);
1194 err = put_old_timex32(utp, &ktx);
1199 SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock,
1200 struct old_timespec32 __user *, tp)
1202 const struct k_clock *kc = clockid_to_kclock(which_clock);
1203 struct timespec64 ts;
1209 err = kc->clock_getres(which_clock, &ts);
1210 if (!err && tp && put_old_timespec32(&ts, tp))
1219 * nanosleep for monotonic and realtime clocks
1221 static int common_nsleep(const clockid_t which_clock, int flags,
1222 const struct timespec64 *rqtp)
1224 ktime_t texp = timespec64_to_ktime(*rqtp);
1226 return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
1227 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1231 static int common_nsleep_timens(const clockid_t which_clock, int flags,
1232 const struct timespec64 *rqtp)
1234 ktime_t texp = timespec64_to_ktime(*rqtp);
1236 if (flags & TIMER_ABSTIME)
1237 texp = timens_ktime_to_host(which_clock, texp);
1239 return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
1240 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1244 SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1245 const struct __kernel_timespec __user *, rqtp,
1246 struct __kernel_timespec __user *, rmtp)
1248 const struct k_clock *kc = clockid_to_kclock(which_clock);
1249 struct timespec64 t;
1256 if (get_timespec64(&t, rqtp))
1259 if (!timespec64_valid(&t))
1261 if (flags & TIMER_ABSTIME)
1263 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1264 current->restart_block.nanosleep.rmtp = rmtp;
1266 return kc->nsleep(which_clock, flags, &t);
1269 #ifdef CONFIG_COMPAT_32BIT_TIME
1271 SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
1272 struct old_timespec32 __user *, rqtp,
1273 struct old_timespec32 __user *, rmtp)
1275 const struct k_clock *kc = clockid_to_kclock(which_clock);
1276 struct timespec64 t;
1283 if (get_old_timespec32(&t, rqtp))
1286 if (!timespec64_valid(&t))
1288 if (flags & TIMER_ABSTIME)
1290 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1291 current->restart_block.nanosleep.compat_rmtp = rmtp;
1293 return kc->nsleep(which_clock, flags, &t);
1298 static const struct k_clock clock_realtime = {
1299 .clock_getres = posix_get_hrtimer_res,
1300 .clock_get_timespec = posix_get_realtime_timespec,
1301 .clock_get_ktime = posix_get_realtime_ktime,
1302 .clock_set = posix_clock_realtime_set,
1303 .clock_adj = posix_clock_realtime_adj,
1304 .nsleep = common_nsleep,
1305 .timer_create = common_timer_create,
1306 .timer_set = common_timer_set,
1307 .timer_get = common_timer_get,
1308 .timer_del = common_timer_del,
1309 .timer_rearm = common_hrtimer_rearm,
1310 .timer_forward = common_hrtimer_forward,
1311 .timer_remaining = common_hrtimer_remaining,
1312 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1313 .timer_wait_running = common_timer_wait_running,
1314 .timer_arm = common_hrtimer_arm,
1317 static const struct k_clock clock_monotonic = {
1318 .clock_getres = posix_get_hrtimer_res,
1319 .clock_get_timespec = posix_get_monotonic_timespec,
1320 .clock_get_ktime = posix_get_monotonic_ktime,
1321 .nsleep = common_nsleep_timens,
1322 .timer_create = common_timer_create,
1323 .timer_set = common_timer_set,
1324 .timer_get = common_timer_get,
1325 .timer_del = common_timer_del,
1326 .timer_rearm = common_hrtimer_rearm,
1327 .timer_forward = common_hrtimer_forward,
1328 .timer_remaining = common_hrtimer_remaining,
1329 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1330 .timer_wait_running = common_timer_wait_running,
1331 .timer_arm = common_hrtimer_arm,
1334 static const struct k_clock clock_monotonic_raw = {
1335 .clock_getres = posix_get_hrtimer_res,
1336 .clock_get_timespec = posix_get_monotonic_raw,
1339 static const struct k_clock clock_realtime_coarse = {
1340 .clock_getres = posix_get_coarse_res,
1341 .clock_get_timespec = posix_get_realtime_coarse,
1344 static const struct k_clock clock_monotonic_coarse = {
1345 .clock_getres = posix_get_coarse_res,
1346 .clock_get_timespec = posix_get_monotonic_coarse,
1349 static const struct k_clock clock_tai = {
1350 .clock_getres = posix_get_hrtimer_res,
1351 .clock_get_ktime = posix_get_tai_ktime,
1352 .clock_get_timespec = posix_get_tai_timespec,
1353 .nsleep = common_nsleep,
1354 .timer_create = common_timer_create,
1355 .timer_set = common_timer_set,
1356 .timer_get = common_timer_get,
1357 .timer_del = common_timer_del,
1358 .timer_rearm = common_hrtimer_rearm,
1359 .timer_forward = common_hrtimer_forward,
1360 .timer_remaining = common_hrtimer_remaining,
1361 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1362 .timer_wait_running = common_timer_wait_running,
1363 .timer_arm = common_hrtimer_arm,
1366 static const struct k_clock clock_boottime = {
1367 .clock_getres = posix_get_hrtimer_res,
1368 .clock_get_ktime = posix_get_boottime_ktime,
1369 .clock_get_timespec = posix_get_boottime_timespec,
1370 .nsleep = common_nsleep_timens,
1371 .timer_create = common_timer_create,
1372 .timer_set = common_timer_set,
1373 .timer_get = common_timer_get,
1374 .timer_del = common_timer_del,
1375 .timer_rearm = common_hrtimer_rearm,
1376 .timer_forward = common_hrtimer_forward,
1377 .timer_remaining = common_hrtimer_remaining,
1378 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1379 .timer_wait_running = common_timer_wait_running,
1380 .timer_arm = common_hrtimer_arm,
1383 static const struct k_clock * const posix_clocks[] = {
1384 [CLOCK_REALTIME] = &clock_realtime,
1385 [CLOCK_MONOTONIC] = &clock_monotonic,
1386 [CLOCK_PROCESS_CPUTIME_ID] = &clock_process,
1387 [CLOCK_THREAD_CPUTIME_ID] = &clock_thread,
1388 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
1389 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
1390 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
1391 [CLOCK_BOOTTIME] = &clock_boottime,
1392 [CLOCK_REALTIME_ALARM] = &alarm_clock,
1393 [CLOCK_BOOTTIME_ALARM] = &alarm_clock,
1394 [CLOCK_TAI] = &clock_tai,
1397 static const struct k_clock *clockid_to_kclock(const clockid_t id)
1402 return (id & CLOCKFD_MASK) == CLOCKFD ?
1403 &clock_posix_dynamic : &clock_posix_cpu;
1406 if (id >= ARRAY_SIZE(posix_clocks))
1409 return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))];