@@ -11,6 +11,7 @@
#define __FLUSHTLB_H__
#include <xen/config.h>
+#include <xen/mm.h>
#include <xen/percpu.h>
#include <xen/smp.h>
#include <xen/types.h>
@@ -115,4 +116,19 @@ void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags);
#define flush_tlb_one_all(v) \
flush_tlb_one_mask(&cpu_online_map, v)
+static inline int flush_page_to_ram(unsigned long mfn) { return -EOPNOTSUPP; }
+static inline int invalidate_dcache_va_range(const void *p, unsigned long size) { return -EOPNOTSUPP; }
+static inline int clean_and_invalidate_dcache_va_range(const void *p, unsigned long size)
+{
+ unsigned int order = get_order_from_bytes(size);
+ /* sub-page granularity support needs to be added if necessary */
+ flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order));
+ return 0;
+}
+static inline int clean_dcache_va_range(const void *p, unsigned long size)
+{
+ clean_and_invalidate_dcache_va_range(p, size);
+ return 0;
+}
+
#endif /* __FLUSHTLB_H__ */
@@ -345,9 +345,6 @@ static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr)
return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3);
}
-/* No cache maintenance required on x86 architecture. */
-static inline int flush_page_to_ram(unsigned long mfn) { return -EOPNOTSUPP; }
-
/* return true if permission increased */
static inline bool_t
perms_strictly_increased(uint32_t old_flags, uint32_t new_flags)
Move the existing flush_page_to_ram flushtlb.h. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- Changes in v5: - make order an unsigned int; - add a comment on sub-page granularity support; - cache operations return error; - move the functions to xen/include/asm-x86/flushtlb.h. Changes in v4: - remove _xen in the function names; - implement the functions using existing x86 flushing functions. --- xen/include/asm-x86/flushtlb.h | 16 ++++++++++++++++ xen/include/asm-x86/page.h | 3 --- 2 files changed, 16 insertions(+), 3 deletions(-)