@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
.nesting = 1,
.nmi_nesting = CT_NESTING_IRQ_NONIDLE,
#endif
- .state = ATOMIC_INIT(CT_RCU_WATCHING),
+ .state = ATOMIC_INIT(CT_RCU_WATCHING | CT_STATE_KERNEL),
};
EXPORT_SYMBOL_GPL(context_tracking);
@@ -147,7 +147,7 @@ static void noinstr ct_kernel_exit(bool user, int offset)
instrumentation_end();
WRITE_ONCE(ct->nesting, 0); /* Avoid irq-access tearing. */
// RCU is watching here ...
- ct_kernel_exit_state(offset);
+ ct_kernel_exit_state(offset - CT_STATE_KERNEL);
// ... but is no longer watching here.
rcu_task_exit();
}
@@ -175,7 +175,7 @@ static void noinstr ct_kernel_enter(bool user, int offset)
}
rcu_task_enter();
// RCU is not watching here ...
- ct_kernel_enter_state(offset);
+ ct_kernel_enter_state(offset + CT_STATE_KERNEL);
// ... but is watching here.
instrumentation_begin();
@@ -537,7 +537,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* RCU only requires CT_RCU_WATCHING increments to be fully
* ordered.
*/
- raw_atomic_add(state, &ct->state);
+ raw_atomic_add(state - CT_STATE_KERNEL, &ct->state);
}
}
}
@@ -647,7 +647,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
* RCU only requires CT_RCU_WATCHING increments to be fully
* ordered.
*/
- raw_atomic_sub(state, &ct->state);
+ raw_atomic_sub(state - CT_STATE_KERNEL, &ct->state);
}
}
}
CT_STATE_KERNEL being zero means it can be (and is) omitted in a handful of places. A later patch will change CT_STATE_KERNEL into a non-zero value, prepare that by using it where it should be: o In the initial CT state o At kernel entry / exit No change in functionality intended. Signed-off-by: Valentin Schneider <vschneid@redhat.com> --- kernel/context_tracking.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)