@@ -204,16 +204,21 @@ static u64 new_context(struct asid_info *info, atomic64_t *pasid)
return idx2asid(info, asid) | generation;
}
-void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
+static void asid_new_context(struct asid_info *info, atomic64_t *pasid,
+ unsigned int cpu);
+
+/*
+ * Check the ASID is still valid for the context. If not generate a new ASID.
+ *
+ * @pasid: Pointer to the current ASID batch
+ * @cpu: current CPU ID. Must have been acquired throught get_cpu()
+ */
+static void asid_check_context(struct asid_info *info,
+ atomic64_t *pasid, unsigned int cpu)
{
- unsigned long flags;
u64 asid, old_active_asid;
- struct asid_info *info = &asid_info;
- if (system_supports_cnp())
- cpu_set_reserved_ttbr0();
-
- asid = atomic64_read(&mm->context.id);
+ asid = atomic64_read(pasid);
/*
* The memory ordering here is subtle.
@@ -234,14 +239,30 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
!((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
old_active_asid, asid))
- goto switch_mm_fastpath;
+ return;
+
+ asid_new_context(info, pasid, cpu);
+}
+
+/*
+ * Generate a new ASID for the context.
+ *
+ * @pasid: Pointer to the current ASID batch allocated. It will be updated
+ * with the new ASID batch.
+ * @cpu: current CPU ID. Must have been acquired through get_cpu()
+ */
+static void asid_new_context(struct asid_info *info, atomic64_t *pasid,
+ unsigned int cpu)
+{
+ unsigned long flags;
+ u64 asid;
raw_spin_lock_irqsave(&info->lock, flags);
/* Check that our ASID belongs to the current generation. */
- asid = atomic64_read(&mm->context.id);
+ asid = atomic64_read(pasid);
if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
- asid = new_context(info, &mm->context.id);
- atomic64_set(&mm->context.id, asid);
+ asid = new_context(info, pasid);
+ atomic64_set(pasid, asid);
}
if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
@@ -249,8 +270,14 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
atomic64_set(&active_asid(info, cpu), asid);
raw_spin_unlock_irqrestore(&info->lock, flags);
+}
+
+void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
+{
+ if (system_supports_cnp())
+ cpu_set_reserved_ttbr0();
-switch_mm_fastpath:
+ asid_check_context(&asid_info, &mm->context.id, cpu);
arm64_apply_bp_hardening();
The function check_and_switch_context is used to: 1) Check whether the ASID is still valid 2) Generate a new one if it is not valid 3) Switch the context While the latter is specific to the MM subsystem, the rest could be part of the generic ASID allocator. After this patch, the function is now split in 3 parts which corresponds to the use of the functions: 1) asid_check_context: Check if the ASID is still valid 2) asid_new_context: Generate a new ASID for the context 3) check_and_switch_context: Call 1) and 2) and switch the context 1) and 2) have not been merged in a single function because we want to avoid to add a branch in when the ASID is still valid. This will matter when the code will be moved in separate file later on as 1) will reside in the header as a static inline function. Signed-off-by: Julien Grall <julien.grall@arm.com> --- Will wants to avoid to add a branch when the ASID is still valid. So 1) and 2) are in separates function. The former will move to a new header and make static inline. --- arch/arm64/mm/context.c | 51 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 12 deletions(-) -- 2.11.0