diff mbox

[Xen-devel,v4,5/5] xen: arm: correct terminology for cache flush macros

Message ID 1391775176-30313-5-git-send-email-ian.campbell@citrix.com
State Superseded
Headers show

Commit Message

Ian Campbell Feb. 7, 2014, 12:12 p.m. UTC
The term "flush" is slightly ambiguous. The correct ARM term for for this
operaton is clean, as opposed to clean+invalidate for which we also now have a
function.

This is a pure rename, no functional change.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
This could easily be left for 4.5.
---
 xen/arch/arm/guestcopy.c         |    2 +-
 xen/arch/arm/kernel.c            |    2 +-
 xen/arch/arm/mm.c                |   16 ++++++++--------
 xen/arch/arm/smpboot.c           |    2 +-
 xen/include/asm-arm/arm32/page.h |    2 +-
 xen/include/asm-arm/arm64/page.h |    2 +-
 xen/include/asm-arm/page.h       |   10 +++++-----
 7 files changed, 18 insertions(+), 18 deletions(-)

Comments

Julien Grall Feb. 7, 2014, 1:10 p.m. UTC | #1
On 07/02/14 12:12, Ian Campbell wrote:
> The term "flush" is slightly ambiguous. The correct ARM term for for this
> operaton is clean, as opposed to clean+invalidate for which we also now have a
> function.
>
> This is a pure rename, no functional change.
>
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>

> ---
> This could easily be left for 4.5.

It would be nice to have a common nomenclature for the functions (a bit 
like your TLB patch series).

But, if the patch don't go in Xen 4.4, it won't change anything to 
backporting. The function name is not reused.
diff mbox

Patch

diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index bd0a355..af0af6b 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -24,7 +24,7 @@  static unsigned long raw_copy_to_guest_helper(void *to, const void *from,
         p += offset;
         memcpy(p, from, size);
         if ( flush_dcache )
-            flush_xen_dcache_va_range(p, size);
+            clean_xen_dcache_va_range(p, size);
 
         unmap_domain_page(p - offset);
         len -= size;
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index 6a5772b..1e3107d 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -58,7 +58,7 @@  void copy_from_paddr(void *dst, paddr_t paddr, unsigned long len, int attrindx)
 
         set_fixmap(FIXMAP_MISC, p, attrindx);
         memcpy(dst, src + s, l);
-        flush_xen_dcache_va_range(dst, l);
+        clean_xen_dcache_va_range(dst, l);
 
         paddr += l;
         dst += l;
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index d2cfe64..4c5cff0 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -480,13 +480,13 @@  void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
     /* Clear the copy of the boot pagetables. Each secondary CPU
      * rebuilds these itself (see head.S) */
     memset(boot_pgtable, 0x0, PAGE_SIZE);
-    flush_xen_dcache(boot_pgtable);
+    clean_xen_dcache(boot_pgtable);
 #ifdef CONFIG_ARM_64
     memset(boot_first, 0x0, PAGE_SIZE);
-    flush_xen_dcache(boot_first);
+    clean_xen_dcache(boot_first);
 #endif
     memset(boot_second, 0x0, PAGE_SIZE);
-    flush_xen_dcache(boot_second);
+    clean_xen_dcache(boot_second);
 
     /* Break up the Xen mapping into 4k pages and protect them separately. */
     for ( i = 0; i < LPAE_ENTRIES; i++ )
@@ -524,7 +524,7 @@  void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
 
     /* Make sure it is clear */
     memset(this_cpu(xen_dommap), 0, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
-    flush_xen_dcache_va_range(this_cpu(xen_dommap),
+    clean_xen_dcache_va_range(this_cpu(xen_dommap),
                               DOMHEAP_SECOND_PAGES*PAGE_SIZE);
 #endif
 }
@@ -535,7 +535,7 @@  int init_secondary_pagetables(int cpu)
     /* Set init_ttbr for this CPU coming up. All CPus share a single setof
      * pagetables, but rewrite it each time for consistency with 32 bit. */
     init_ttbr = (uintptr_t) xen_pgtable + phys_offset;
-    flush_xen_dcache(init_ttbr);
+    clean_xen_dcache(init_ttbr);
     return 0;
 }
 #else
@@ -570,15 +570,15 @@  int init_secondary_pagetables(int cpu)
         write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte);
     }
 
-    flush_xen_dcache_va_range(first, PAGE_SIZE);
-    flush_xen_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
+    clean_xen_dcache_va_range(first, PAGE_SIZE);
+    clean_xen_dcache_va_range(domheap, DOMHEAP_SECOND_PAGES*PAGE_SIZE);
 
     per_cpu(xen_pgtable, cpu) = first;
     per_cpu(xen_dommap, cpu) = domheap;
 
     /* Set init_ttbr for this CPU coming up */
     init_ttbr = __pa(first);
-    flush_xen_dcache(init_ttbr);
+    clean_xen_dcache(init_ttbr);
 
     return 0;
 }
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index c53c765..a829957 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -378,7 +378,7 @@  int __cpu_up(unsigned int cpu)
 
     /* Open the gate for this CPU */
     smp_up_cpu = cpu_logical_map(cpu);
-    flush_xen_dcache(smp_up_cpu);
+    clean_xen_dcache(smp_up_cpu);
 
     rc = arch_cpu_up(cpu);
 
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index cb6add4..b8221ca 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -20,7 +20,7 @@  static inline void write_pte(lpae_t *p, lpae_t pte)
 }
 
 /* Inline ASM to flush dcache on register R (may be an inline asm operand) */
-#define __flush_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
+#define __clean_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
 
 /* Inline ASM to clean and invalidate dcache on register R (may be an
  * inline asm operand) */
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index baf8903..3352821 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -15,7 +15,7 @@  static inline void write_pte(lpae_t *p, lpae_t pte)
 }
 
 /* Inline ASM to flush dcache on register R (may be an inline asm operand) */
-#define __flush_xen_dcache_one(R) "dc cvac, %" #R ";"
+#define __clean_xen_dcache_one(R) "dc cvac, %" #R ";"
 
 /* Inline ASM to clean and invalidate dcache on register R (may be an
  * inline asm operand) */
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 67d64c9..a577942 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -229,26 +229,26 @@  extern size_t cacheline_bytes;
 /* Function for flushing medium-sized areas.
  * if 'range' is large enough we might want to use model-specific
  * full-cache flushes. */
-static inline void flush_xen_dcache_va_range(void *p, unsigned long size)
+static inline void clean_xen_dcache_va_range(void *p, unsigned long size)
 {
     void *end;
     dsb();           /* So the CPU issues all writes to the range */
     for ( end = p + size; p < end; p += cacheline_bytes )
-        asm volatile (__flush_xen_dcache_one(0) : : "r" (p));
+        asm volatile (__clean_xen_dcache_one(0) : : "r" (p));
     dsb();           /* So we know the flushes happen before continuing */
 }
 
 /* Macro for flushing a single small item.  The predicate is always
  * compile-time constant so this will compile down to 3 instructions in
  * the common case. */
-#define flush_xen_dcache(x) do {                                        \
+#define clean_xen_dcache(x) do {                                        \
     typeof(x) *_p = &(x);                                               \
     if ( sizeof(x) > MIN_CACHELINE_BYTES || sizeof(x) > alignof(x) )    \
-        flush_xen_dcache_va_range(_p, sizeof(x));                       \
+        clean_xen_dcache_va_range(_p, sizeof(x));                       \
     else                                                                \
         asm volatile (                                                  \
             "dsb sy;"   /* Finish all earlier writes */                 \
-            __flush_xen_dcache_one(0)                                   \
+            __clean_xen_dcache_one(0)                                   \
             "dsb sy;"   /* Finish flush before continuing */            \
             : : "r" (_p), "m" (*_p));                                   \
 } while (0)