@@ -94,7 +94,7 @@ different format depending on what is required by the user:
down the seconds to the full seconds of the last timer tick
using the respective reference.
-Coarse and fast_ns access
+Coarse and fast access
-------------------------
Some additional variants exist for more specialized cases:
@@ -125,6 +125,11 @@ Some additional variants exist for more specialized cases:
up to several microseconds on older hardware with an external
clocksource.
+.. c:function:: ktime_t ktime_get_mono_fast_ns( void )
+ ktime_t ktime_get_raw_fast_ns( void )
+ ktime_t ktime_get_boottime_fast_ns( void )
+ ktime_t ktime_get_real_fast_ns( void )
+
.. c:function:: u64 ktime_get_mono_fast_ns( void )
u64 ktime_get_raw_fast_ns( void )
u64 ktime_get_boot_fast_ns( void )
@@ -146,10 +146,30 @@ static inline u64 ktime_get_raw_ns(void)
return ktime_to_ns(ktime_get_raw());
}
-extern u64 ktime_get_mono_fast_ns(void);
-extern u64 ktime_get_raw_fast_ns(void);
-extern u64 ktime_get_boot_fast_ns(void);
-extern u64 ktime_get_real_fast_ns(void);
+extern ktime_t ktime_get_mono_fast(void);
+extern ktime_t ktime_get_raw_fast(void);
+extern ktime_t ktime_get_boottime_fast(void);
+extern ktime_t ktime_get_real_fast(void);
+
+static inline u64 ktime_get_mono_fast_ns(void)
+{
+ return ktime_to_ns(ktime_get_mono_fast());
+}
+
+static inline u64 ktime_get_raw_fast_ns(void)
+{
+ return ktime_to_ns(ktime_get_raw_fast());
+}
+
+static inline u64 ktime_get_boot_fast_ns(void)
+{
+ return ktime_to_ns(ktime_get_boottime_fast());
+}
+
+static inline u64 ktime_get_real_fast_ns(void)
+{
+ return ktime_to_ns(ktime_get_real_fast());
+}
/*
* timespec64/time64_t interfaces utilizing the ktime based ones
@@ -443,41 +443,40 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
* of the following timestamps. Callers need to be aware of that and
* deal with it.
*/
-static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
+static __always_inline ktime_t __ktime_get_fast(struct tk_fast *tkf)
{
struct tk_read_base *tkr;
unsigned int seq;
- u64 now;
+ ktime_t now;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
- now = ktime_to_ns(tkr->base);
-
- now += timekeeping_delta_to_ns(tkr,
+ now = ktime_add_ns(tkr->base,
+ timekeeping_delta_to_ns(tkr,
clocksource_delta(
tk_clock_read(tkr),
tkr->cycle_last,
- tkr->mask));
+ tkr->mask)));
} while (read_seqcount_retry(&tkf->seq, seq));
return now;
}
-u64 ktime_get_mono_fast_ns(void)
+ktime_t ktime_get_mono_fast(void)
{
- return __ktime_get_fast_ns(&tk_fast_mono);
+ return __ktime_get_fast(&tk_fast_mono);
}
-EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
+EXPORT_SYMBOL_GPL(ktime_get_mono_fast);
-u64 ktime_get_raw_fast_ns(void)
+ktime_t ktime_get_raw_fast(void)
{
- return __ktime_get_fast_ns(&tk_fast_raw);
+ return __ktime_get_fast(&tk_fast_raw);
}
-EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
+EXPORT_SYMBOL_GPL(ktime_get_raw_fast);
/**
- * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
+ * ktime_get_boottime_fast - NMI safe and fast access to boot clock.
*
* To keep it NMI safe since we're accessing from tracing, we're not using a
* separate timekeeper with updates to monotonic clock and boot offset
@@ -497,47 +496,46 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
* partially updated. Since the tk->offs_boot update is a rare event, this
* should be a rare occurrence which postprocessing should be able to handle.
*/
-u64 notrace ktime_get_boot_fast_ns(void)
+ktime_t notrace ktime_get_boottime_fast(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
- return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
+ return ktime_add(ktime_get_mono_fast(), tk->offs_boot);
}
-EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
+EXPORT_SYMBOL_GPL(ktime_get_boottime_fast);
/*
- * See comment for __ktime_get_fast_ns() vs. timestamp ordering
+ * See comment for __ktime_get_fast() vs. timestamp ordering
*/
-static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
+static __always_inline ktime_t __ktime_get_real_fast(struct tk_fast *tkf)
{
struct tk_read_base *tkr;
unsigned int seq;
- u64 now;
+ ktime_t now;
do {
seq = raw_read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
- now = ktime_to_ns(tkr->base_real);
-
- now += timekeeping_delta_to_ns(tkr,
+ now = ktime_add_ns(tkr->base_real,
+ timekeeping_delta_to_ns(tkr,
clocksource_delta(
tk_clock_read(tkr),
tkr->cycle_last,
- tkr->mask));
+ tkr->mask)));
} while (read_seqcount_retry(&tkf->seq, seq));
return now;
}
/**
- * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
+ * ktime_get_real_fast: - NMI safe and fast access to clock realtime.
*/
-u64 ktime_get_real_fast_ns(void)
+ktime_t ktime_get_real_fast(void)
{
- return __ktime_get_real_fast_ns(&tk_fast_mono);
+ return __ktime_get_real_fast(&tk_fast_mono);
}
-EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
+EXPORT_SYMBOL_GPL(ktime_get_real_fast);
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
Previously there was no analogue to get proper ktime_t versions of the fast variety of ktime invocations. This commit makes the interface uniform with the other accessors. Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Thomas Gleixner <tglx@linutronix.de> --- Documentation/core-api/timekeeping.rst | 7 +++- include/linux/timekeeping.h | 28 ++++++++++++-- kernel/time/timekeeping.c | 52 +++++++++++++------------- 3 files changed, 55 insertions(+), 32 deletions(-) -- 2.21.0