diff mbox

[Xen-devel,1/4] xen/arm: introduce invalidate_xen_dcache_va_range

Message ID 1412244158-12124-1-git-send-email-stefano.stabellini@eu.citrix.com
State New
Headers show

Commit Message

Stefano Stabellini Oct. 2, 2014, 10:02 a.m. UTC
Take care of handling non-cacheline aligned addresses and sizes.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
 xen/include/asm-arm/arm32/page.h |    3 +++
 xen/include/asm-arm/arm64/page.h |    3 +++
 xen/include/asm-arm/page.h       |   30 ++++++++++++++++++++++++++++++
 3 files changed, 36 insertions(+)

Comments

Julien Grall Oct. 2, 2014, 11:57 a.m. UTC | #1
Hi Stefano,

n 10/02/2014 11:02 AM, Stefano Stabellini wrote:
> +
> +static inline void invalidate_xen_dcache_va_range(const void *p, unsigned long size)
> +{
> +    size_t off;
> +    const void *end = p + size;
> +
> +    dsb(sy);           /* So the CPU issues all writes to the range */

I'm wondering if we could relax the dsb(sy) to dsb(ish)?

In any case:

Reviewed-by: Julien Grall <julien.grall@linaro.org>

Regards,
Ian Campbell Oct. 3, 2014, 1:39 p.m. UTC | #2
On Thu, 2014-10-02 at 11:02 +0100, Stefano Stabellini wrote:
> Take care of handling non-cacheline aligned addresses and sizes.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>

Acked-by: Ian Campbell <ian.campbell@citrix.com>
Ian Campbell Oct. 3, 2014, 2 p.m. UTC | #3
On Thu, 2014-10-02 at 12:57 +0100, Julien Grall wrote:
> Hi Stefano,
> 
> n 10/02/2014 11:02 AM, Stefano Stabellini wrote:
> > +
> > +static inline void invalidate_xen_dcache_va_range(const void *p, unsigned long size)
> > +{
> > +    size_t off;
> > +    const void *end = p + size;
> > +
> > +    dsb(sy);           /* So the CPU issues all writes to the range */
> 
> I'm wondering if we could relax the dsb(sy) to dsb(ish)?

We would need to know which cache level the device we are talking to is
coherent with, which we don't know so we have to be conservative.

Also since you only need this hypercall for incoherent devices I suppose
it needs to push things all the way down.

Ian.
diff mbox

Patch

diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index 9740672..6fb2e68 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -19,6 +19,9 @@  static inline void write_pte(lpae_t *p, lpae_t pte)
         : : "r" (pte.bits), "r" (p) : "memory");
 }
 
+/* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */
+#define __invalidate_xen_dcache_one(R) STORE_CP32(R, DCIMVAC)
+
 /* Inline ASM to flush dcache on register R (may be an inline asm operand) */
 #define __clean_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
 
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index bb10164..f181b1b 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -14,6 +14,9 @@  static inline void write_pte(lpae_t *p, lpae_t pte)
         : : "r" (pte.bits), "r" (p) : "memory");
 }
 
+/* Inline ASM to invalidate dcache on register R (may be an inline asm operand) */
+#define __invalidate_xen_dcache_one(R) "dc ivac, %" #R ";"
+
 /* Inline ASM to flush dcache on register R (may be an inline asm operand) */
 #define __clean_xen_dcache_one(R) "dc cvac, %" #R ";"
 
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index d758b61..da02e97 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -268,6 +268,36 @@  extern size_t cacheline_bytes;
 /* Functions for flushing medium-sized areas.
  * if 'range' is large enough we might want to use model-specific
  * full-cache flushes. */
+
+static inline void invalidate_xen_dcache_va_range(const void *p, unsigned long size)
+{
+    size_t off;
+    const void *end = p + size;
+
+    dsb(sy);           /* So the CPU issues all writes to the range */
+
+    off = (unsigned long)p % cacheline_bytes;
+    if ( off )
+    {
+        p -= off;
+        asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (p));
+        p += cacheline_bytes;
+        size -= cacheline_bytes - off;
+    }
+    off = (unsigned long)end % cacheline_bytes;
+    if ( off )
+    {
+        end -= off;
+        size -= off;
+        asm volatile (__clean_and_invalidate_xen_dcache_one(0) : : "r" (end));
+    }
+
+    for ( ; p < end; p += cacheline_bytes )
+        asm volatile (__invalidate_xen_dcache_one(0) : : "r" (p));
+
+    dsb(sy);           /* So we know the flushes happen before continuing */
+}
+
 static inline void clean_xen_dcache_va_range(const void *p, unsigned long size)
 {
     const void *end;