arm64: mm: Add asid_gen_match() helper
authorJean-Philippe Brucker <jean-philippe@linaro.org>
Tue, 19 May 2020 17:54:43 +0000 (19:54 +0200)
committerWill Deacon <will@kernel.org>
Thu, 21 May 2020 13:46:14 +0000 (14:46 +0100)
Add a macro to check if an ASID is from the current generation, since a
subsequent patch will introduce a third user for this test.

Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Link: https://lore.kernel.org/r/20200519175502.2504091-6-jean-philippe@linaro.org
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/mm/context.c

index 9b26f9a88724f11d3649152531c57b5b673b91e0..d702d60e64dab00f28a1918cbf714e6b7e1a81fc 100644 (file)
@@ -92,6 +92,9 @@ static void set_reserved_asid_bits(void)
                bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
 }
 
+#define asid_gen_match(asid) \
+       (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
+
 static void flush_context(void)
 {
        int i;
@@ -220,8 +223,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
         *   because atomic RmWs are totally ordered for a given location.
         */
        old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
-       if (old_active_asid &&
-           !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
+       if (old_active_asid && asid_gen_match(asid) &&
            atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
                                     old_active_asid, asid))
                goto switch_mm_fastpath;
@@ -229,7 +231,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
        raw_spin_lock_irqsave(&cpu_asid_lock, flags);
        /* Check that our ASID belongs to the current generation. */
        asid = atomic64_read(&mm->context.id);
-       if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
+       if (!asid_gen_match(asid)) {
                asid = new_context(mm);
                atomic64_set(&mm->context.id, asid);
        }