Message ID | 20191108210824.1534248-7-arnd@arndb.de |
---|---|
State | Accepted |
Commit | 176ed98c8a76ee08babf99b25b00992c2a5e7bbc |
Headers | show |
Series | y2038 cleanups | expand |
On Fri, 2019-11-08 at 22:07 +0100, Arnd Bergmann wrote: [...] > --- a/arch/powerpc/kernel/vdso32/gettimeofday.S > +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S > @@ -15,10 +15,8 @@ > /* Offset for the low 32-bit part of a field of long type */ > #if defined(CONFIG_PPC64) && defined(CONFIG_CPU_BIG_ENDIAN) > #define LOPART 4 > -#define TSPEC_TV_SEC TSPC64_TV_SEC+LOPART > #else > #define LOPART 0 > -#define TSPEC_TV_SEC TSPC32_TV_SEC > #endif > > .text > @@ -192,7 +190,7 @@ V_FUNCTION_BEGIN(__kernel_time) > bl __get_datapage@local > mr r9, r3 /* datapage ptr in r9 */ > > - lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9) > + lwz r3,STAMP_XTIME_SEC+LOWPART(r9) "LOWPART" should be "LOPART". > > cmplwi r11,0 /* check if t is NULL */ > beq 2f > @@ -268,7 +266,7 @@ __do_get_tspec: > * as a 32.32 fixed-point number in r3 and r4. > * Load & add the xtime stamp. > */ > - lwz r5,STAMP_XTIME+TSPEC_TV_SEC(r9) > + lwz r5,STAMP_XTIME_SEC+LOWPART(r9) Same here. > lwz r6,STAMP_SEC_FRAC(r9) > addc r4,r4,r6 > adde r3,r3,r5 [...] -- Ben Hutchings, Software Developer Codethink Ltd https://www.codethink.co.uk/ Dale House, 35 Dale Street Manchester, M1 2HF, United Kingdom
On Wed, Nov 20, 2019 at 11:43 PM Ben Hutchings <ben.hutchings@codethink.co.uk> wrote: > > On Fri, 2019-11-08 at 22:07 +0100, Arnd Bergmann wrote: > [...] > > --- a/arch/powerpc/kernel/vdso32/gettimeofday.S > > +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S > > @@ -15,10 +15,8 @@ > > /* Offset for the low 32-bit part of a field of long type */ > > #if defined(CONFIG_PPC64) && defined(CONFIG_CPU_BIG_ENDIAN) > > #define LOPART 4 > > -#define TSPEC_TV_SEC TSPC64_TV_SEC+LOPART > > #else > > #define LOPART 0 > > -#define TSPEC_TV_SEC TSPC32_TV_SEC > > #endif > > > > .text > > @@ -192,7 +190,7 @@ V_FUNCTION_BEGIN(__kernel_time) > > bl __get_datapage@local > > mr r9, r3 /* datapage ptr in r9 */ > > > > - lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9) > > + lwz r3,STAMP_XTIME_SEC+LOWPART(r9) > > "LOWPART" should be "LOPART". > Thanks, fixed both instances in a patch on top now. I considered folding it into the original patch, but as it's close to the merge window I'd rather not rebase it, and this way I also give you credit for finding the bug. I'm surprised that the 0-day bot did not report this already. Thanks fro the careful review! Arnd commit 1c11ca7a0584ddede5b8c93057b40d31e8a96d3d (HEAD) Author: Arnd Bergmann <arnd@arndb.de> Date: Thu Nov 21 15:19:49 2019 +0100 y2038: fix typo in powerpc vdso "LOPART" The earlier patch introduced a typo, change LOWPART back to LOPART. Fixes: 176ed98c8a76 ("y2038: vdso: powerpc: avoid timespec references") Reported-by: Ben Hutchings <ben.hutchings@codethink.co.uk> Signed-off-by: Arnd Bergmann <arnd@arndb.de> diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index a7180b0f4aa1..c8e6902cb01b 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -190,7 +190,7 @@ V_FUNCTION_BEGIN(__kernel_time) bl __get_datapage@local mr r9, r3 /* datapage ptr in r9 */ - lwz r3,STAMP_XTIME_SEC+LOWPART(r9) + lwz r3,STAMP_XTIME_SEC+LOPART(r9) cmplwi r11,0 /* check if t is NULL */ beq 2f @@ -266,7 +266,7 @@ __do_get_tspec: * as a 32.32 fixed-point number in r3 and r4. * Load & add the xtime stamp. */ - lwz r5,STAMP_XTIME_SEC+LOWPART(r9) + lwz r5,STAMP_XTIME_SEC+LOPART(r9) lwz r6,STAMP_SEC_FRAC(r9) addc r4,r4,r6 adde r3,r3,r5
Arnd Bergmann <arnd@arndb.de> a écrit : > On Wed, Nov 20, 2019 at 11:43 PM Ben Hutchings > <ben.hutchings@codethink.co.uk> wrote: >> >> On Fri, 2019-11-08 at 22:07 +0100, Arnd Bergmann wrote: >> [...] >> > --- a/arch/powerpc/kernel/vdso32/gettimeofday.S >> > +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S >> > @@ -15,10 +15,8 @@ >> > /* Offset for the low 32-bit part of a field of long type */ >> > #if defined(CONFIG_PPC64) && defined(CONFIG_CPU_BIG_ENDIAN) >> > #define LOPART 4 >> > -#define TSPEC_TV_SEC TSPC64_TV_SEC+LOPART >> > #else >> > #define LOPART 0 >> > -#define TSPEC_TV_SEC TSPC32_TV_SEC >> > #endif >> > >> > .text >> > @@ -192,7 +190,7 @@ V_FUNCTION_BEGIN(__kernel_time) >> > bl __get_datapage@local >> > mr r9, r3 /* datapage ptr in r9 */ >> > >> > - lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9) >> > + lwz r3,STAMP_XTIME_SEC+LOWPART(r9) >> >> "LOWPART" should be "LOPART". >> > > Thanks, fixed both instances in a patch on top now. I considered folding > it into the original patch, but as it's close to the merge window I'd > rather not rebase it, and this way I also give you credit for > finding the bug. Take care, might conflict with https://github.com/linuxppc/linux/commit/5e381d727fe8834ca5a126f510194a7a4ac6dd3a Christophe > > I'm surprised that the 0-day bot did not report this already. > > Thanks fro the careful review! > > Arnd > > commit 1c11ca7a0584ddede5b8c93057b40d31e8a96d3d (HEAD) > Author: Arnd Bergmann <arnd@arndb.de> > Date: Thu Nov 21 15:19:49 2019 +0100 > > y2038: fix typo in powerpc vdso "LOPART" > > The earlier patch introduced a typo, change LOWPART back to > LOPART. > > Fixes: 176ed98c8a76 ("y2038: vdso: powerpc: avoid timespec references") > Reported-by: Ben Hutchings <ben.hutchings@codethink.co.uk> > Signed-off-by: Arnd Bergmann <arnd@arndb.de> > > diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S > b/arch/powerpc/kernel/vdso32/gettimeofday.S > index a7180b0f4aa1..c8e6902cb01b 100644 > --- a/arch/powerpc/kernel/vdso32/gettimeofday.S > +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S > @@ -190,7 +190,7 @@ V_FUNCTION_BEGIN(__kernel_time) > bl __get_datapage@local > mr r9, r3 /* datapage ptr in r9 */ > > - lwz r3,STAMP_XTIME_SEC+LOWPART(r9) > + lwz r3,STAMP_XTIME_SEC+LOPART(r9) > > cmplwi r11,0 /* check if t is NULL */ > beq 2f > @@ -266,7 +266,7 @@ __do_get_tspec: > * as a 32.32 fixed-point number in r3 and r4. > * Load & add the xtime stamp. > */ > - lwz r5,STAMP_XTIME_SEC+LOWPART(r9) > + lwz r5,STAMP_XTIME_SEC+LOPART(r9) > > lwz r6,STAMP_SEC_FRAC(r9) > addc r4,r4,r6 > adde r3,r3,r5
On Thu, Nov 21, 2019 at 5:25 PM Christophe Leroy <christophe.leroy@c-s.fr> wrote: > Arnd Bergmann <arnd@arndb.de> a écrit : > > On Wed, Nov 20, 2019 at 11:43 PM Ben Hutchings > > <ben.hutchings@codethink.co.uk> wrote: > >> > >> On Fri, 2019-11-08 at 22:07 +0100, Arnd Bergmann wrote: > >> > @@ -192,7 +190,7 @@ V_FUNCTION_BEGIN(__kernel_time) > >> > bl __get_datapage@local > >> > mr r9, r3 /* datapage ptr in r9 */ > >> > > >> > - lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9) > >> > + lwz r3,STAMP_XTIME_SEC+LOWPART(r9) > >> > >> "LOWPART" should be "LOPART". > >> > > > > Thanks, fixed both instances in a patch on top now. I considered folding > > it into the original patch, but as it's close to the merge window I'd > > rather not rebase it, and this way I also give you credit for > > finding the bug. > > Take care, might conflict with > https://github.com/linuxppc/linux/commit/5e381d727fe8834ca5a126f510194a7a4ac6dd3a Sorry for my late reply. I see this commit and no other variant of it has made it into linux-next by now, so I assume this is not getting sent for v5.5 and it's not stopping me from sending my own pull request. Please let me know if I missed something and this will cause problems. On a related note: are you still working on the generic lib/vdso support for powerpc? Without that, future libc implementations that use 64-bit time_t will have to use the slow clock_gettime64 syscall instead of the vdso, which has a significant performance impact. Arnd
Le 27/11/2019 à 12:03, Arnd Bergmann a écrit : > On Thu, Nov 21, 2019 at 5:25 PM Christophe Leroy > <christophe.leroy@c-s.fr> wrote: >> Arnd Bergmann <arnd@arndb.de> a écrit : >>> On Wed, Nov 20, 2019 at 11:43 PM Ben Hutchings >>> <ben.hutchings@codethink.co.uk> wrote: >>>> >>>> On Fri, 2019-11-08 at 22:07 +0100, Arnd Bergmann wrote: >>>>> @@ -192,7 +190,7 @@ V_FUNCTION_BEGIN(__kernel_time) >>>>> bl __get_datapage@local >>>>> mr r9, r3 /* datapage ptr in r9 */ >>>>> >>>>> - lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9) >>>>> + lwz r3,STAMP_XTIME_SEC+LOWPART(r9) >>>> >>>> "LOWPART" should be "LOPART". >>>> >>> >>> Thanks, fixed both instances in a patch on top now. I considered folding >>> it into the original patch, but as it's close to the merge window I'd >>> rather not rebase it, and this way I also give you credit for >>> finding the bug. >> >> Take care, might conflict with >> https://github.com/linuxppc/linux/commit/5e381d727fe8834ca5a126f510194a7a4ac6dd3a > > Sorry for my late reply. I see this commit and no other variant of it has > made it into linux-next by now, so I assume this is not getting sent for v5.5 > and it's not stopping me from sending my own pull request. > > Please let me know if I missed something and this will cause problems. > > On a related note: are you still working on the generic lib/vdso support for > powerpc? Without that, future libc implementations that use 64-bit time_t > will have to use the slow clock_gettime64 syscall instead of the vdso, > which has a significant performance impact. I have left this generic lib/vdso subject aside for the moment, because performance is disappointing and its architecture doesn't real fit with powerpc ABI. From a performance point of view, it is manipulating 64 bits vars where is could use 32 bits vars. Of course I understand that y2038 will anyway force the use of 64 bits for seconds, but at the time being powerpc32 VDSO is using 32 bits vars for both secs and ns, it make the difference. Also, the generic VDSO is playing too much with data on stacks and associated memory read/write/copies, which kills performance on RISC processors like powerpc. Inlining do_hres() for instance significantly improves that as it allow handling the 'struct __kernel_timespec ts' on registers instead of using stack. Regarding powerpc ABI, the issue is that errors shall be reported by setting the SO bit in CR register, and this cannot be done in C. This means: - The VDSO entry point must be in ASM and the generic VDSO C function must be called from there, it cannot be the VDSO entry point. - The VDSO fallback (ie the system call) cannot be done from the generic VDSO C function, it must be called from the ASM as well. Last point/question, what's the point in using 64 bits for nanoseconds on 32 bits arches ? Christophe
On Mon, Dec 2, 2019 at 1:55 PM Christophe Leroy <christophe.leroy@c-s.fr> wrote: > Le 27/11/2019 à 12:03, Arnd Bergmann a écrit : > > On Thu, Nov 21, 2019 at 5:25 PM Christophe Leroy > > <christophe.leroy@c-s.fr> wrote: > >> Arnd Bergmann <arnd@arndb.de> a écrit : > >>> On Wed, Nov 20, 2019 at 11:43 PM Ben Hutchings > >>> <ben.hutchings@codethink.co.uk> wrote: > >>>> > >>>> On Fri, 2019-11-08 at 22:07 +0100, Arnd Bergmann wrote: > >>>>> @@ -192,7 +190,7 @@ V_FUNCTION_BEGIN(__kernel_time) > >>>>> bl __get_datapage@local > >>>>> mr r9, r3 /* datapage ptr in r9 */ > >>>>> > >>>>> - lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9) > >>>>> + lwz r3,STAMP_XTIME_SEC+LOWPART(r9) > >>>> > >>>> "LOWPART" should be "LOPART". > >>>> > >>> > >>> Thanks, fixed both instances in a patch on top now. I considered folding > >>> it into the original patch, but as it's close to the merge window I'd > >>> rather not rebase it, and this way I also give you credit for > >>> finding the bug. > >> > >> Take care, might conflict with > >> https://github.com/linuxppc/linux/commit/5e381d727fe8834ca5a126f510194a7a4ac6dd3a > > > > Sorry for my late reply. I see this commit and no other variant of it has > > made it into linux-next by now, so I assume this is not getting sent for v5.5 > > and it's not stopping me from sending my own pull request. > > > > Please let me know if I missed something and this will cause problems. > > > > On a related note: are you still working on the generic lib/vdso support for > > powerpc? Without that, future libc implementations that use 64-bit time_t > > will have to use the slow clock_gettime64 syscall instead of the vdso, > > which has a significant performance impact. > > I have left this generic lib/vdso subject aside for the moment, because > performance is disappointing and its architecture doesn't real fit with > powerpc ABI. > > From a performance point of view, it is manipulating 64 bits vars where > is could use 32 bits vars. Of course I understand that y2038 will anyway > force the use of 64 bits for seconds, but at the time being powerpc32 > VDSO is using 32 bits vars for both secs and ns, it make the difference. Do you think we could optimize the common code? This sounds like it could improve things for other architectures as well. > Also, the generic VDSO is playing too much with data on stacks and > associated memory read/write/copies, which kills performance on RISC > processors like powerpc. Inlining do_hres() for instance significantly > improves that as it allow handling the 'struct __kernel_timespec ts' on > registers instead of using stack. That should be easy enough to change in the common code, as long as adding 'inline' does not cause harm on x86 and arm. > Regarding powerpc ABI, the issue is that errors shall be reported by > setting the SO bit in CR register, and this cannot be done in C. > This means: > - The VDSO entry point must be in ASM and the generic VDSO C function > must be called from there, it cannot be the VDSO entry point. > - The VDSO fallback (ie the system call) cannot be done from the generic > VDSO C function, it must be called from the ASM as well. As far as I can tell, both the VDSO entry point and the fallback are in architecture specific code on all architectures, so this does not seem to be a show-stopper. It also seems that they might be combined as long the current powerpc code could be changed to use the generic vdso_data structure definition: the existing code can keep being used for gettimeofday(), clock_gettime(CLOCK_MONOTONIC, ...) and clock_gettime(CLOCK_REALTIME), while the generic implementation can be called for clock_gettime64(), clock_getres() and clock_gettime() with other time clock IDs. > Last point/question, what's the point in using 64 bits for nanoseconds > on 32 bits arches ? The __kernel_timespec structure is defined with two 64-bit members so it has the same layout on both 32-bit and 64-bit architectures, which lets us share the implementation of the compat syscall handlers even on big-endian architectures, and it avoids accidentally leaking four bytes of stack data when copying a timespec from kernel to user space. The high 32 bits of the nanosecond are expected to always be zero when copying to user space, and to be ignored when copied into the kernel (see get_timespec64()). Note that C99 and POSIX require tv_nsec to be 'long', so 64-bit architectures have to make it 64-bit wide, and 32-bit architectures end up including padding for it. In the vdso_data, the "nsec" value is shifted, so it actually needs more bits. I don't know if this is a strict requirement, or if we could change it to be 32 bits non-shifted during the update at the cost of losing 1 nanosecond of accuracy. Arnd
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index c61d59ed3b45..a115970a6809 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -81,7 +81,8 @@ struct vdso_data { __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ __s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */ __s64 wtom_clock_sec; /* Wall to monotonic clock sec */ - struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ + __s64 stamp_xtime_sec; /* xtime secs as at tb_orig_stamp */ + __s64 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */ __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ }; @@ -101,7 +102,8 @@ struct vdso_data { __u32 tz_dsttime; /* Type of dst correction 0x5C */ __s32 wtom_clock_sec; /* Wall to monotonic clock */ __s32 wtom_clock_nsec; - struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ + __s32 stamp_xtime_sec; /* xtime seconds as at tb_orig_stamp */ + __s32 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */ __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ __u32 dcache_block_size; /* L1 d-cache block size */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 827f4c354e13..f22bd6d1fe93 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -385,7 +385,8 @@ int main(void) OFFSET(CFG_SYSCALL_MAP32, vdso_data, syscall_map_32); OFFSET(WTOM_CLOCK_SEC, vdso_data, wtom_clock_sec); OFFSET(WTOM_CLOCK_NSEC, vdso_data, wtom_clock_nsec); - OFFSET(STAMP_XTIME, vdso_data, stamp_xtime); + OFFSET(STAMP_XTIME_SEC, vdso_data, stamp_xtime_sec); + OFFSET(STAMP_XTIME_NSEC, vdso_data, stamp_xtime_nsec); OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction); OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size); OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size); @@ -395,18 +396,13 @@ int main(void) OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64); OFFSET(TVAL64_TV_SEC, __kernel_old_timeval, tv_sec); OFFSET(TVAL64_TV_USEC, __kernel_old_timeval, tv_usec); +#endif + OFFSET(TSPC64_TV_SEC, __kernel_timespec, tv_sec); + OFFSET(TSPC64_TV_NSEC, __kernel_timespec, tv_nsec); OFFSET(TVAL32_TV_SEC, old_timeval32, tv_sec); OFFSET(TVAL32_TV_USEC, old_timeval32, tv_usec); - OFFSET(TSPC64_TV_SEC, timespec, tv_sec); - OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec); OFFSET(TSPC32_TV_SEC, old_timespec32, tv_sec); OFFSET(TSPC32_TV_NSEC, old_timespec32, tv_nsec); -#else - OFFSET(TVAL32_TV_SEC, __kernel_old_timeval, tv_sec); - OFFSET(TVAL32_TV_USEC, __kernel_old_timeval, tv_usec); - OFFSET(TSPC32_TV_SEC, timespec, tv_sec); - OFFSET(TSPC32_TV_NSEC, timespec, tv_nsec); -#endif /* timeval/timezone offsets for use by vdso */ OFFSET(TZONE_TZ_MINWEST, timezone, tz_minuteswest); OFFSET(TZONE_TZ_DSTTIME, timezone, tz_dsttime); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 694522308cd5..1fad5a04d083 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -885,7 +885,7 @@ static notrace u64 timebase_read(struct clocksource *cs) void update_vsyscall(struct timekeeper *tk) { - struct timespec xt; + struct timespec64 xt; struct clocksource *clock = tk->tkr_mono.clock; u32 mult = tk->tkr_mono.mult; u32 shift = tk->tkr_mono.shift; @@ -957,7 +957,8 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->tb_to_xs = new_tb_to_xs; vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec; vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec; - vdso_data->stamp_xtime = xt; + vdso_data->stamp_xtime_sec = xt.sec; + vdso_data->stamp_xtime_nsec = xt.nsec; vdso_data->stamp_sec_fraction = frac_sec; smp_wmb(); ++(vdso_data->tb_update_count); diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 4327665ad86f..37ba4c3d965b 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -15,10 +15,8 @@ /* Offset for the low 32-bit part of a field of long type */ #if defined(CONFIG_PPC64) && defined(CONFIG_CPU_BIG_ENDIAN) #define LOPART 4 -#define TSPEC_TV_SEC TSPC64_TV_SEC+LOPART #else #define LOPART 0 -#define TSPEC_TV_SEC TSPC32_TV_SEC #endif .text @@ -192,7 +190,7 @@ V_FUNCTION_BEGIN(__kernel_time) bl __get_datapage@local mr r9, r3 /* datapage ptr in r9 */ - lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9) + lwz r3,STAMP_XTIME_SEC+LOWPART(r9) cmplwi r11,0 /* check if t is NULL */ beq 2f @@ -268,7 +266,7 @@ __do_get_tspec: * as a 32.32 fixed-point number in r3 and r4. * Load & add the xtime stamp. */ - lwz r5,STAMP_XTIME+TSPEC_TV_SEC(r9) + lwz r5,STAMP_XTIME_SEC+LOWPART(r9) lwz r6,STAMP_SEC_FRAC(r9) addc r4,r4,r6 adde r3,r3,r5 diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index 07bfe33fe874..1f24e411af80 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -116,8 +116,8 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE * too */ - ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3) - ld r5,STAMP_XTIME+TSPC64_TV_NSEC(r3) + ld r4,STAMP_XTIME_SEC(r3) + ld r5,STAMP_XTIME_NSEC(r3) bne cr6,75f /* CLOCK_MONOTONIC_COARSE */ @@ -220,7 +220,7 @@ V_FUNCTION_BEGIN(__kernel_time) mr r11,r3 /* r11 holds t */ bl V_LOCAL_FUNC(__get_datapage) - ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3) + ld r4,STAMP_XTIME_SEC(r3) cmpldi r11,0 /* check if t is NULL */ beq 2f @@ -265,7 +265,7 @@ V_FUNCTION_BEGIN(__do_get_tspec) mulhdu r6,r6,r5 /* in units of 2^-32 seconds */ /* Add stamp since epoch */ - ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3) + ld r4,STAMP_XTIME_SEC(r3) lwz r5,STAMP_SEC_FRAC(r3) or r0,r4,r5 or r0,r0,r6
As a preparation to stop using 'struct timespec' in the kernel, change the powerpc vdso implementation: - split up the vdso data definition to have equivalent members for seconds and nanoseconds instead of an xtime structure - use timespec64 as an intermediate for the xtime update - change the asm-offsets definition to be based the appropriate fixed-length types This is only a temporary fix for changing the types, in order to actually support a 64-bit safe vdso32 version of clock_gettime(), the entire powerpc vdso should be replaced with the generic lib/vdso/ implementation. If that happens first, this patch becomes obsolete. Signed-off-by: Arnd Bergmann <arnd@arndb.de> --- arch/powerpc/include/asm/vdso_datapage.h | 6 ++++-- arch/powerpc/kernel/asm-offsets.c | 14 +++++--------- arch/powerpc/kernel/time.c | 5 +++-- arch/powerpc/kernel/vdso32/gettimeofday.S | 6 ++---- arch/powerpc/kernel/vdso64/gettimeofday.S | 8 ++++---- 5 files changed, 18 insertions(+), 21 deletions(-) -- 2.20.0