Skip to content

Commit 75a19a0

Browse files
committed
arm64: arch_timer: Ensure counter register reads occur with seqlock held
When executing clock_gettime(), either in the vDSO or via a system call, we need to ensure that the read of the counter register occurs within the seqlock reader critical section. This ensures that updates to the clocksource parameters (e.g. the multiplier) are consistent with the counter value and therefore avoids the situation where time appears to go backwards across multiple reads. Extend the vDSO logic so that the seqlock critical section covers the read of the counter register as well as accesses to the data page. Since reads of the counter system registers are not ordered by memory barrier instructions, introduce dependency ordering from the counter read to a subsequent memory access so that the seqlock memory barriers apply to the counter access in both the vDSO and the system call paths. Cc: <[email protected]> Cc: Marc Zyngier <[email protected]> Tested-by: Vincenzo Frascino <[email protected]> Link: https://lore.kernel.org/linux-arm-kernel/[email protected]/ Reported-by: Thomas Gleixner <[email protected]> Signed-off-by: Will Deacon <[email protected]>
1 parent 2f1d4e2 commit 75a19a0

File tree

2 files changed

+42
-6
lines changed

2 files changed

+42
-6
lines changed

arch/arm64/include/asm/arch_timer.h

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -148,18 +148,47 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
148148
isb();
149149
}
150150

151+
/*
152+
* Ensure that reads of the counter are treated the same as memory reads
153+
* for the purposes of ordering by subsequent memory barriers.
154+
*
155+
* This insanity brought to you by speculative system register reads,
156+
* out-of-order memory accesses, sequence locks and Thomas Gleixner.
157+
*
158+
* http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
159+
*/
160+
#define arch_counter_enforce_ordering(val) do { \
161+
u64 tmp, _val = (val); \
162+
\
163+
asm volatile( \
164+
" eor %0, %1, %1\n" \
165+
" add %0, sp, %0\n" \
166+
" ldr xzr, [%0]" \
167+
: "=r" (tmp) : "r" (_val)); \
168+
} while (0)
169+
151170
static inline u64 arch_counter_get_cntpct(void)
152171
{
172+
u64 cnt;
173+
153174
isb();
154-
return arch_timer_reg_read_stable(cntpct_el0);
175+
cnt = arch_timer_reg_read_stable(cntpct_el0);
176+
arch_counter_enforce_ordering(cnt);
177+
return cnt;
155178
}
156179

157180
static inline u64 arch_counter_get_cntvct(void)
158181
{
182+
u64 cnt;
183+
159184
isb();
160-
return arch_timer_reg_read_stable(cntvct_el0);
185+
cnt = arch_timer_reg_read_stable(cntvct_el0);
186+
arch_counter_enforce_ordering(cnt);
187+
return cnt;
161188
}
162189

190+
#undef arch_counter_enforce_ordering
191+
163192
static inline int arch_timer_arch_init(void)
164193
{
165194
return 0;

arch/arm64/kernel/vdso/gettimeofday.S

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,13 @@ x_tmp .req x8
7373
movn x_tmp, #0xff00, lsl #48
7474
and \res, x_tmp, \res
7575
mul \res, \res, \mult
76+
/*
77+
* Fake address dependency from the value computed from the counter
78+
* register to subsequent data page accesses so that the sequence
79+
* locking also orders the read of the counter.
80+
*/
81+
and x_tmp, \res, xzr
82+
add vdso_data, vdso_data, x_tmp
7683
.endm
7784

7885
/*
@@ -147,12 +154,12 @@ ENTRY(__kernel_gettimeofday)
147154
/* w11 = cs_mono_mult, w12 = cs_shift */
148155
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
149156
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
150-
seqcnt_check fail=1b
151157

152158
get_nsec_per_sec res=x9
153159
lsl x9, x9, x12
154160

155161
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
162+
seqcnt_check fail=1b
156163
get_ts_realtime res_sec=x10, res_nsec=x11, \
157164
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
158165

@@ -211,13 +218,13 @@ realtime:
211218
/* w11 = cs_mono_mult, w12 = cs_shift */
212219
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
213220
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
214-
seqcnt_check fail=realtime
215221

216222
/* All computations are done with left-shifted nsecs. */
217223
get_nsec_per_sec res=x9
218224
lsl x9, x9, x12
219225

220226
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
227+
seqcnt_check fail=realtime
221228
get_ts_realtime res_sec=x10, res_nsec=x11, \
222229
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
223230
clock_gettime_return, shift=1
@@ -231,14 +238,14 @@ monotonic:
231238
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
232239
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
233240
ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
234-
seqcnt_check fail=monotonic
235241

236242
/* All computations are done with left-shifted nsecs. */
237243
lsl x4, x4, x12
238244
get_nsec_per_sec res=x9
239245
lsl x9, x9, x12
240246

241247
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
248+
seqcnt_check fail=monotonic
242249
get_ts_realtime res_sec=x10, res_nsec=x11, \
243250
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
244251

@@ -253,13 +260,13 @@ monotonic_raw:
253260
/* w11 = cs_raw_mult, w12 = cs_shift */
254261
ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
255262
ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
256-
seqcnt_check fail=monotonic_raw
257263

258264
/* All computations are done with left-shifted nsecs. */
259265
get_nsec_per_sec res=x9
260266
lsl x9, x9, x12
261267

262268
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
269+
seqcnt_check fail=monotonic_raw
263270
get_ts_clock_raw res_sec=x10, res_nsec=x11, \
264271
clock_nsec=x15, nsec_to_sec=x9
265272

0 commit comments

Comments
 (0)