@@ -335,7 +335,7 @@ void set_fixmap(unsigned map, mfn_t mfn, unsigned int flags)
pte.pt.table = 1; /* 4k mappings always have this bit set */
pte.pt.xn = 1;
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
/* Remove a mapping from a fixmap entry */
@@ -343,7 +343,7 @@ void clear_fixmap(unsigned map)
{
lpae_t pte = {0};
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+ flush_xen_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
/* Create Xen's mappings of memory.
@@ -377,7 +377,7 @@ static void __init create_mappings(lpae_t *second,
write_pte(p + i, pte);
pte.pt.base += 1 << LPAE_SHIFT;
}
- flush_xen_data_tlb_local();
+ flush_xen_tlb_local();
}
#ifdef CONFIG_DOMAIN_PAGE
@@ -455,7 +455,7 @@ void *map_domain_page(mfn_t mfn)
* We may not have flushed this specific subpage at map time,
* since we only flush the 4k page not the superpage
*/
- flush_xen_data_tlb_range_va_local(va, PAGE_SIZE);
+ flush_xen_tlb_range_va_local(va, PAGE_SIZE);
return (void *)va;
}
@@ -598,7 +598,7 @@ void __init remove_early_mappings(void)
write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START), pte);
write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START + SZ_2M),
pte);
- flush_xen_data_tlb_range_va(BOOT_FDT_VIRT_START, BOOT_FDT_SLOT_SIZE);
+ flush_xen_tlb_range_va(BOOT_FDT_VIRT_START, BOOT_FDT_SLOT_SIZE);
}
/*
@@ -615,7 +615,7 @@ static void xen_pt_enforce_wnx(void)
* before flushing the TLBs.
*/
isb();
- flush_xen_data_tlb_local();
+ flush_xen_tlb_local();
}
extern void switch_ttbr(uint64_t ttbr);
@@ -879,7 +879,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
vaddr += FIRST_SIZE;
}
- flush_xen_data_tlb_local();
+ flush_xen_tlb_local();
}
#endif
@@ -1052,7 +1052,7 @@ static int create_xen_entries(enum xenmap_operation op,
BUG();
}
}
- flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
+ flush_xen_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
rc = 0;
@@ -1127,7 +1127,7 @@ static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg)
}
write_pte(xen_xenmap + i, pte);
}
- flush_xen_data_tlb_local();
+ flush_xen_tlb_local();
}
/* Release all __init and __initdata ranges to be reused */
@@ -61,12 +61,8 @@ static inline void invalidate_icache_local(void)
isb(); /* Synchronize fetched instruction stream. */
}
-/*
- * Flush all hypervisor mappings from the data TLB of the local
- * processor. This is not sufficient when changing code mappings or
- * for self modifying code.
- */
-static inline void flush_xen_data_tlb_local(void)
+/* Flush all hypervisor mappings from the TLB of the local processor. */
+static inline void flush_xen_tlb_local(void)
{
asm volatile("dsb;" /* Ensure preceding are visible */
CMD_CP32(TLBIALLH)
@@ -76,14 +72,13 @@ static inline void flush_xen_data_tlb_local(void)
}
/* Flush TLB of local processor for address va. */
-static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
+static inline void __flush_xen_tlb_one_local(vaddr_t va)
{
asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
}
-/* Flush TLB of all processors in the inner-shareable domain for
- * address va. */
-static inline void __flush_xen_data_tlb_one(vaddr_t va)
+/* Flush TLB of all processors in the inner-shareable domain for address va. */
+static inline void __flush_xen_tlb_one(vaddr_t va)
{
asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
}
@@ -45,12 +45,8 @@ static inline void invalidate_icache_local(void)
isb();
}
-/*
- * Flush all hypervisor mappings from the data TLB of the local
- * processor. This is not sufficient when changing code mappings or
- * for self modifying code.
- */
-static inline void flush_xen_data_tlb_local(void)
+/* Flush all hypervisor mappings from the TLB of the local processor. */
+static inline void flush_xen_tlb_local(void)
{
asm volatile (
"dsb sy;" /* Ensure visibility of PTE writes */
@@ -61,14 +57,13 @@ static inline void flush_xen_data_tlb_local(void)
}
/* Flush TLB of local processor for address va. */
-static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
+static inline void __flush_xen_tlb_one_local(vaddr_t va)
{
asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
}
-/* Flush TLB of all processors in the inner-shareable domain for
- * address va. */
-static inline void __flush_xen_data_tlb_one(vaddr_t va)
+/* Flush TLB of all processors in the inner-shareable domain for address va. */
+static inline void __flush_xen_tlb_one(vaddr_t va)
{
asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
}
@@ -234,18 +234,18 @@ static inline int clean_and_invalidate_dcache_va_range
} while (0)
/*
- * Flush a range of VA's hypervisor mappings from the data TLB of the
- * local processor. This is not sufficient when changing code mappings
- * or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the TLB of the local
+ * processor.
*/
-static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
- unsigned long size)
+static inline void flush_xen_tlb_range_va_local(vaddr_t va,
+ unsigned long size)
{
- unsigned long end = va + size;
+ vaddr_t end = va + size;
+
dsb(sy); /* Ensure preceding are visible */
while ( va < end )
{
- __flush_xen_data_tlb_one_local(va);
+ __flush_xen_tlb_one_local(va);
va += PAGE_SIZE;
}
dsb(sy); /* Ensure completion of the TLB flush */
@@ -253,18 +253,18 @@ static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
}
/*
- * Flush a range of VA's hypervisor mappings from the data TLB of all
- * processors in the inner-shareable domain. This is not sufficient
- * when changing code mappings or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the TLB of all
+ * processors in the inner-shareable domain.
*/
-static inline void flush_xen_data_tlb_range_va(unsigned long va,
- unsigned long size)
+static inline void flush_xen_tlb_range_va(vaddr_t va,
+ unsigned long size)
{
- unsigned long end = va + size;
+ vaddr_t end = va + size;
+
dsb(sy); /* Ensure preceding are visible */
while ( va < end )
{
- __flush_xen_data_tlb_one(va);
+ __flush_xen_tlb_one(va);
va += PAGE_SIZE;
}
dsb(sy); /* Ensure completion of the TLB flush */