@@ -97,6 +97,7 @@ config ARM64
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_NO_INSTR
+ select ARCH_WANTS_RT_DELAYED_SIGNALS
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARM_AMBA
select ARM_ARCH_TIMER
@@ -22,8 +22,4 @@ static inline void __user *arch_untagged_si_addr(void __user *addr,
}
#define arch_untagged_si_addr arch_untagged_si_addr
-#if defined(CONFIG_PREEMPT_RT)
-#define ARCH_RT_DELAYS_SIGNAL_SEND
-#endif
-
#endif
@@ -928,7 +928,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
} else {
local_daif_restore(DAIF_PROCCTX);
-#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
+#ifdef CONFIG_RT_DELAYED_SIGNALS
if (unlikely(current->forced_info.si_signo)) {
struct task_struct *t = current;
force_sig_info(&t->forced_info);
@@ -121,6 +121,7 @@ config X86
select ARCH_WANTS_NO_INSTR
select ARCH_WANT_HUGE_PMD_SHARE
select ARCH_WANT_LD_ORPHAN_WARN
+ select ARCH_WANTS_RT_DELAYED_SIGNALS
select ARCH_WANTS_THP_SWAP if X86_64
select ARCH_HAS_PARANOID_L1D_FLUSH
select BUILDTIME_TABLE_SORT
@@ -28,19 +28,6 @@ typedef struct {
#define SA_IA32_ABI 0x02000000u
#define SA_X32_ABI 0x01000000u
-/*
- * Because some traps use the IST stack, we must keep preemption
- * disabled while calling do_trap(), but do_trap() may call
- * force_sig_info() which will grab the signal spin_locks for the
- * task, which in PREEMPT_RT are mutexes. By defining
- * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
- * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
- * trap.
- */
-#if defined(CONFIG_PREEMPT_RT)
-#define ARCH_RT_DELAYS_SIGNAL_SEND
-#endif
-
#ifndef CONFIG_COMPAT
#define compat_sigset_t compat_sigset_t
typedef sigset_t compat_sigset_t;
@@ -707,8 +707,7 @@ static size_t crng_fast_load(const u8 *cp, size_t len)
u8 *p;
size_t ret = 0;
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
+ spin_lock_irqsave(&primary_crng.lock, flags);
if (crng_init != 0) {
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0;
@@ -1086,6 +1085,19 @@ static void mix_interrupt_randomness(struct work_struct *work)
fast_pool->last = jiffies;
local_irq_enable();
+ if (unlikely(crng_init == 0)) {
+ size_t ret;
+
+ ret = crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool));
+ if (ret) {
+ local_irq_disable();
+ WRITE_ONCE(fast_pool->count, 0);
+ fast_pool->last = jiffies;
+ local_irq_enable();
+ return;
+ }
+ }
+
mix_pool_bytes(pool, sizeof(pool));
credit_entropy_bits(1);
memzero_explicit(pool, sizeof(pool));
@@ -1119,11 +1131,18 @@ void add_interrupt_randomness(int irq)
add_interrupt_bench(cycles);
if (unlikely(crng_init == 0)) {
- if ((new_count >= 64) &&
- crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
- fast_pool->count = 0;
- fast_pool->last = now;
- }
+ if (new_count & MIX_INFLIGHT)
+ return;
+
+ if (new_count < 64)
+ return;
+
+ if (unlikely(!fast_pool->mix.func))
+ INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
+
+ fast_pool->count |= MIX_INFLIGHT;
+ queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
+
return;
}
@@ -600,6 +600,7 @@ asmlinkage void __do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
+extern void softirq_spawn_ksoftirqd(void);
extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
@@ -230,11 +230,11 @@ do { \
preempt_count_dec(); \
} while (0)
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+
#ifndef CONFIG_PREEMPT_RT
-# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
# define preempt_check_resched_rt() barrier();
#else
-# define preempt_enable_no_resched() preempt_enable()
# define preempt_check_resched_rt() preempt_check_resched()
#endif
@@ -95,13 +95,6 @@ void rcu_init_tasks_generic(void);
static inline void rcu_init_tasks_generic(void) { }
#endif
-#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TASKS_RCU_GENERIC)
-void rcu_tasks_initiate_self_tests(void);
-#else
-static inline void rcu_tasks_initiate_self_tests(void) {}
-#endif
-
-
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
void rcu_sysrq_end(void);
@@ -1083,8 +1083,8 @@ struct task_struct {
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;
-#ifdef CONFIG_PREEMPT_RT
- struct kernel_siginfo forced_info;
+#ifdef CONFIG_RT_DELAYED_SIGNALS
+ struct kernel_siginfo forced_info;
#endif
unsigned long sas_ss_sp;
size_t sas_ss_size;
@@ -2043,78 +2043,126 @@ static inline int need_resched_now(void)
#endif
#ifdef CONFIG_PREEMPT_RT
-static inline bool task_match_saved_state(struct task_struct *p, long match_state)
-{
- return p->saved_state == match_state;
-}
-static inline bool task_is_traced(struct task_struct *task)
+static inline bool task_state_match_and(struct task_struct *tsk, long state)
{
- bool traced = false;
-
- /* in case the task is sleeping on tasklist_lock */
- raw_spin_lock_irq(&task->pi_lock);
- if (READ_ONCE(task->__state) & __TASK_TRACED)
- traced = true;
- else if (task->saved_state & __TASK_TRACED)
- traced = true;
- raw_spin_unlock_irq(&task->pi_lock);
- return traced;
-}
-
-static inline bool task_is_stopped_or_traced(struct task_struct *task)
-{
- bool traced_stopped = false;
unsigned long flags;
+ bool match = false;
- raw_spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&tsk->pi_lock, flags);
+ if (READ_ONCE(tsk->__state) & state)
+ match = true;
+ else if (tsk->saved_state & state)
+ match = true;
+ raw_spin_unlock_irqrestore(&tsk->pi_lock, flags);
+ return match;
+}
- if (READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED))
- traced_stopped = true;
- else if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
- traced_stopped = true;
+static inline bool __task_state_match_eq(struct task_struct *tsk, long state)
+{
+ bool match = false;
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- return traced_stopped;
+ if (READ_ONCE(tsk->__state) == state)
+ match = true;
+ else if (tsk->saved_state == state)
+ match = true;
+ return match;
+}
+
+static inline bool task_state_match_eq(struct task_struct *tsk, long state)
+{
+ unsigned long flags;
+ bool match;
+
+ raw_spin_lock_irqsave(&tsk->pi_lock, flags);
+ match = __task_state_match_eq(tsk, state);
+ raw_spin_unlock_irqrestore(&tsk->pi_lock, flags);
+ return match;
+}
+
+static inline bool task_state_match_and_set(struct task_struct *tsk, long state,
+ long new_state)
+{
+ unsigned long flags;
+ bool match = false;
+
+ raw_spin_lock_irqsave(&tsk->pi_lock, flags);
+ if (READ_ONCE(tsk->__state) & state) {
+ WRITE_ONCE(tsk->__state, new_state);
+ match = true;
+ } else if (tsk->saved_state & state) {
+ tsk->__state = new_state;
+ match = true;
+ }
+ raw_spin_unlock_irqrestore(&tsk->pi_lock, flags);
+ return match;
+}
+
+static inline bool task_state_match_eq_set(struct task_struct *tsk, long state,
+ long new_state)
+{
+ unsigned long flags;
+ bool match = false;
+
+ raw_spin_lock_irqsave(&tsk->pi_lock, flags);
+ if (READ_ONCE(tsk->__state) == state) {
+ WRITE_ONCE(tsk->__state, new_state);
+ match = true;
+ } else if (tsk->saved_state == state) {
+ tsk->saved_state = new_state;
+ match = true;
+ }
+ raw_spin_unlock_irqrestore(&tsk->pi_lock, flags);
+ return match;
}
#else
-static inline bool task_match_saved_state(struct task_struct *p, long match_state)
+static inline bool task_state_match_and(struct task_struct *tsk, long state)
{
+ return READ_ONCE(tsk->__state) & state;
+}
+
+static inline bool __task_state_match_eq(struct task_struct *tsk, long state)
+{
+ return READ_ONCE(tsk->__state) == state;
+}
+
+static inline bool task_state_match_eq(struct task_struct *tsk, long state)
+{
+ return __task_state_match_eq(tsk, state);
+}
+
+static inline bool task_state_match_and_set(struct task_struct *tsk, long state,
+ long new_state)
+{
+ if (READ_ONCE(tsk->__state) & state) {
+ WRITE_ONCE(tsk->__state, new_state);
+ return true;
+ }
return false;
}
-static inline bool task_is_traced(struct task_struct *task)
+static inline bool task_state_match_eq_set(struct task_struct *tsk, long state,
+ long new_state)
{
- return READ_ONCE(task->__state) & __TASK_TRACED;
+ if (READ_ONCE(tsk->__state) == state) {
+ WRITE_ONCE(tsk->__state, new_state);
+ return true;
+ }
+ return false;
}
-static inline bool task_is_stopped_or_traced(struct task_struct *task)
-{
- return READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED);
-}
#endif
-static inline bool task_match_state_or_saved(struct task_struct *p,
- long match_state)
+static inline bool task_is_traced(struct task_struct *tsk)
{
- if (READ_ONCE(p->__state) == match_state)
- return true;
-
- return task_match_saved_state(p, match_state);
+ return task_state_match_and(tsk, __TASK_TRACED);
}
-static inline bool task_match_state_lock(struct task_struct *p,
- long match_state)
+static inline bool task_is_stopped_or_traced(struct task_struct *tsk)
{
- bool match;
-
- raw_spin_lock_irq(&p->pi_lock);
- match = task_match_state_or_saved(p, match_state);
- raw_spin_unlock_irq(&p->pi_lock);
-
- return match;
+ return task_state_match_and(tsk, __TASK_STOPPED | __TASK_TRACED);
}
/*
@@ -1598,9 +1598,9 @@ static noinline void __init kernel_init_freeable(void)
init_mm_internals();
+ softirq_spawn_ksoftirqd();
rcu_init_tasks_generic();
do_pre_smp_initcalls();
- rcu_tasks_initiate_self_tests();
lockup_detector_init();
smp_init();
@@ -138,4 +138,14 @@ config SCHED_CORE
which is the likely usage by Linux distributions, there should
be no measurable impact on performance.
+config ARCH_WANTS_RT_DELAYED_SIGNALS
+ bool
+ help
+ This option is selected by architectures where raising signals
+ can happen in atomic contexts on PREEMPT_RT enabled kernels. This
+ option delays raising the signal until the return to user space
+ loop where it is also delivered. X86 requires this to deliver
+ signals from trap handlers which run on IST stacks.
+config RT_DELAYED_SIGNALS
+ def_bool PREEMPT_RT && ARCH_WANTS_RT_DELAYED_SIGNALS
@@ -148,6 +148,18 @@ static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work)
arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING);
}
+#ifdef CONFIG_RT_DELAYED_SIGNALS
+static inline void raise_delayed_signal(void)
+{
+ if (unlikely(current->forced_info.si_signo)) {
+ force_sig_info(¤t->forced_info);
+ current->forced_info.si_signo = 0;
+ }
+}
+#else
+static inline void raise_delayed_signal(void) { }
+#endif
+
static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
unsigned long ti_work)
{
@@ -162,13 +174,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
if (ti_work & _TIF_NEED_RESCHED_MASK)
schedule();
-#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
- if (unlikely(current->forced_info.si_signo)) {
- struct task_struct *t = current;
- force_sig_info(&t->forced_info);
- t->forced_info.si_signo = 0;
- }
-#endif
+ raise_delayed_signal();
if (ti_work & _TIF_UPROBE)
uprobe_notify_resume(regs);
@@ -195,21 +195,10 @@ static bool ptrace_freeze_traced(struct task_struct *task)
return ret;
spin_lock_irq(&task->sighand->siglock);
- if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
- !__fatal_signal_pending(task)) {
-#ifdef CONFIG_PREEMPT_RT
- unsigned long flags;
+ if (!looks_like_a_spurious_pid(task) && !__fatal_signal_pending(task)) {
- raw_spin_lock_irqsave(&task->pi_lock, flags);
- if (READ_ONCE(task->__state) & __TASK_TRACED)
- WRITE_ONCE(task->__state, __TASK_TRACED);
- else
- task->saved_state = __TASK_TRACED;
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-#else
- WRITE_ONCE(task->__state, __TASK_TRACED);
-#endif
- ret = true;
+ ret = task_state_match_and_set(task, __TASK_TRACED,
+ __TASK_TRACED);
}
spin_unlock_irq(&task->sighand->siglock);
@@ -218,8 +207,7 @@ static bool ptrace_freeze_traced(struct task_struct *task)
static void ptrace_unfreeze_traced(struct task_struct *task)
{
- unsigned long flags;
- bool frozen = true;
+ bool frozen;
if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
READ_ONCE(task->__state) != __TASK_TRACED)
@@ -232,17 +220,8 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
* Recheck state under the lock to close this race.
*/
spin_lock_irq(&task->sighand->siglock);
- raw_spin_lock_irqsave(&task->pi_lock, flags);
- if (READ_ONCE(task->__state) == __TASK_TRACED)
- WRITE_ONCE(task->__state, TASK_TRACED);
-#ifdef CONFIG_PREEMPT_RT
- else if (task->saved_state == __TASK_TRACED)
- task->saved_state = TASK_TRACED;
-#endif
- else
- frozen = false;
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ frozen = task_state_match_eq_set(task, __TASK_TRACED, TASK_TRACED);
if (frozen && __fatal_signal_pending(task))
wake_up_state(task, __TASK_TRACED);
@@ -46,7 +46,7 @@ struct rcu_tasks_percpu {
/**
* struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
- * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
+ * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
* @cbs_gbl_lock: Lock protecting callback list.
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
* @gp_func: This flavor's grace-period-wait function.
@@ -77,7 +77,7 @@ struct rcu_tasks_percpu {
* @kname: This flavor's kthread name.
*/
struct rcu_tasks {
- struct wait_queue_head cbs_wq;
+ struct rcuwait cbs_wait;
raw_spinlock_t cbs_gbl_lock;
int gp_state;
int gp_sleep;
@@ -113,11 +113,11 @@ static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
.lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
- .rtp_irq_work = IRQ_WORK_INIT(call_rcu_tasks_iw_wakeup), \
+ .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
}; \
static struct rcu_tasks rt_name = \
{ \
- .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
+ .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
.cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
.gp_func = gp, \
.call_func = call, \
@@ -261,7 +261,7 @@ static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
rtp = rtpcp->rtpp;
- wake_up(&rtp->cbs_wq);
+ rcuwait_wake_up(&rtp->cbs_wait);
}
// Enqueue a callback for the specified flavor of Tasks RCU.
@@ -509,7 +509,9 @@ static int __noreturn rcu_tasks_kthread(void *arg)
set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
/* If there were none, wait a bit and start over. */
- wait_event_idle(rtp->cbs_wq, (needgpcb = rcu_tasks_need_gpcb(rtp)));
+ rcuwait_wait_event(&rtp->cbs_wait,
+ (needgpcb = rcu_tasks_need_gpcb(rtp)),
+ TASK_IDLE);
if (needgpcb & 0x2) {
// Wait for one grace period.
@@ -1661,7 +1663,7 @@ static void test_rcu_tasks_callback(struct rcu_head *rhp)
rttd->notrun = true;
}
-void rcu_tasks_initiate_self_tests(void)
+static void rcu_tasks_initiate_self_tests(void)
{
pr_info("Running RCU-tasks wait API self tests\n");
#ifdef CONFIG_TASKS_RCU
@@ -1698,7 +1700,9 @@ static int rcu_tasks_verify_self_tests(void)
return ret;
}
late_initcall(rcu_tasks_verify_self_tests);
-#endif /* #ifdef CONFIG_PROVE_RCU */
+#else /* #ifdef CONFIG_PROVE_RCU */
+static void rcu_tasks_initiate_self_tests(void) { }
+#endif /* #else #ifdef CONFIG_PROVE_RCU */
void __init rcu_init_tasks_generic(void)
{
@@ -1713,6 +1717,9 @@ void __init rcu_init_tasks_generic(void)
#ifdef CONFIG_TASKS_TRACE_RCU
rcu_spawn_tasks_trace_kthread();
#endif
+
+ // Run the self-tests.
+ rcu_tasks_initiate_self_tests();
}
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
@@ -3281,7 +3281,8 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
- if (match_state && !task_match_state_lock(p, match_state))
+ if (match_state &&
+ unlikely(!task_state_match_eq(p, match_state)))
return 0;
cpu_relax();
}
@@ -3296,7 +3297,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
- if (!match_state || task_match_state_or_saved(p, match_state))
+ if (!match_state || __task_state_match_eq(p, match_state))
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf);
@@ -1307,6 +1307,43 @@ enum sig_handler {
HANDLER_EXIT, /* Only visible as the process exit code */
};
+/*
+ * On some archictectures, PREEMPT_RT has to delay sending a signal from a
+ * trap since it cannot enable preemption, and the signal code's
+ * spin_locks turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME
+ * which will send the signal on exit of the trap.
+ */
+#ifdef CONFIG_RT_DELAYED_SIGNALS
+static inline bool force_sig_delayed(struct kernel_siginfo *info,
+ struct task_struct *t)
+{
+ if (!in_atomic())
+ return false;
+
+ if (WARN_ON_ONCE(t->forced_info.si_signo))
+ return true;
+
+ if (is_si_special(info)) {
+ WARN_ON_ONCE(info != SEND_SIG_PRIV);
+ t->forced_info.si_signo = info->si_signo;
+ t->forced_info.si_errno = 0;
+ t->forced_info.si_code = SI_KERNEL;
+ t->forced_info.si_pid = 0;
+ t->forced_info.si_uid = 0;
+ } else {
+ t->forced_info = *info;
+ }
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ return true;
+}
+#else
+static inline bool force_sig_delayed(struct kernel_siginfo *info,
+ struct task_struct *t)
+{
+ return false;
+}
+#endif
+
/*
* Force a signal that the process can't ignore: if necessary
* we unblock the signal and change any SIG_IGN to SIG_DFL.
@@ -1327,34 +1364,9 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
struct k_sigaction *action;
int sig = info->si_signo;
- /*
- * On some archs, PREEMPT_RT has to delay sending a signal from a trap
- * since it can not enable preemption, and the signal code's spin_locks
- * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
- * send the signal on exit of the trap.
- */
-#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
- if (in_atomic()) {
- struct task_struct *t = current;
-
- if (WARN_ON_ONCE(t->forced_info.si_signo))
- return 0;
-
- if (is_si_special(info)) {
- WARN_ON_ONCE(info != SEND_SIG_PRIV);
- t->forced_info.si_signo = info->si_signo;
- t->forced_info.si_errno = 0;
- t->forced_info.si_code = SI_KERNEL;
- t->forced_info.si_pid = 0;
- t->forced_info.si_uid = 0;
- } else {
- t->forced_info = *info;
- }
-
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ if (force_sig_delayed(info, t))
return 0;
- }
-#endif
+
spin_lock_irqsave(&t->sighand->siglock, flags);
action = &t->sighand->action[sig-1];
ignored = action->sa.sa_handler == SIG_IGN;
@@ -1037,17 +1037,14 @@ static struct smp_hotplug_thread timer_threads = {
.thread_comm = "ktimers/%u",
};
-static __init int spawn_ksoftirqd(void)
+__init void softirq_spawn_ksoftirqd(void)
{
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
takeover_tasklets);
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
if (IS_ENABLED(CONFIG_PREEMPT_RT))
BUG_ON(smpboot_register_percpu_thread(&timer_threads));
-
- return 0;
}
-early_initcall(spawn_ksoftirqd);
/*
* [ These __weak aliases are kept in a separate compilation unit, so that
@@ -1 +1 @@
--rt10
+-rt11