diff mbox series

[Xen-devel,v3,04/16] xen/arm: mm: Redefine virt_to_mfn to support typesafe

Message ID 20170630155431.23824-5-julien.grall@arm.com
State Accepted
Commit d641694ddb304175b7d3d9e5ed7a9a4a40ab2568
Headers show
Series xen/arm: Clean-up memory subsystems | expand

Commit Message

Julien Grall June 30, 2017, 3:54 p.m. UTC
The file xen/arch/arm/mm.c is using the typesafe MFN in most of the
place. This requires all caller of virt_to_mfn to prefixed by _mfn(...).

To avoid the extra _mfn(...), re-defined virt_to_mfn within arch/arm/mm.c
to handle typesafe MFN.

This patch also introduce __virt_to_mfn, so virt_to_mfn can be
re-defined easily.

Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
    Changes in v2:
        - Use __virt_to_mfn rather than mfn_x(virt_to_mfn()).

    Changes in v3:
        - Add Stefano's reviewed-by
---
 xen/arch/arm/mm.c        | 16 ++++++++++------
 xen/include/asm-arm/mm.h |  3 ++-
 2 files changed, 12 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index d3674e732e..1e9e342fd3 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -44,6 +44,10 @@ 
 
 struct domain *dom_xen, *dom_io, *dom_cow;
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef virt_to_mfn
+#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
+
 /* Static start-of-day pagetables that we use before the allocators
  * are up. These are used by all CPUs during bringup before switching
  * to the CPUs own pagetables.
@@ -479,7 +483,7 @@  unsigned long domain_page_map_to_mfn(const void *ptr)
     unsigned long offset = (va>>THIRD_SHIFT) & LPAE_ENTRY_MASK;
 
     if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
-        return virt_to_mfn(va);
+        return __virt_to_mfn(va);
 
     ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
     ASSERT(map[slot].pt.avail != 0);
@@ -764,7 +768,7 @@  int init_secondary_pagetables(int cpu)
      * domheap mapping pages. */
     for ( i = 0; i < DOMHEAP_SECOND_PAGES; i++ )
     {
-        pte = mfn_to_xen_entry(_mfn(virt_to_mfn(domheap+i*LPAE_ENTRIES)),
+        pte = mfn_to_xen_entry(virt_to_mfn(domheap+i*LPAE_ENTRIES),
                                WRITEALLOC);
         pte.pt.table = 1;
         write_pte(&first[first_table_offset(DOMHEAP_VIRT_START+i*FIRST_SIZE)], pte);
@@ -961,7 +965,7 @@  static int create_xen_table(lpae_t *entry)
     if ( p == NULL )
         return -ENOMEM;
     clear_page(p);
-    pte = mfn_to_xen_entry(_mfn(virt_to_mfn(p)), WRITEALLOC);
+    pte = mfn_to_xen_entry(virt_to_mfn(p), WRITEALLOC);
     pte.pt.table = 1;
     write_pte(entry, pte);
     return 0;
@@ -1215,7 +1219,7 @@  int xenmem_add_to_physmap_one(
     unsigned long idx,
     gfn_t gfn)
 {
-    unsigned long mfn = 0;
+    mfn_t mfn = INVALID_MFN;
     int rc;
     p2m_type_t t;
     struct page_info *page = NULL;
@@ -1301,7 +1305,7 @@  int xenmem_add_to_physmap_one(
             return -EINVAL;
         }
 
-        mfn = page_to_mfn(page);
+        mfn = _mfn(page_to_mfn(page));
         t = p2m_map_foreign;
 
         rcu_unlock_domain(od);
@@ -1320,7 +1324,7 @@  int xenmem_add_to_physmap_one(
     }
 
     /* Map at new location. */
-    rc = guest_physmap_add_entry(d, gfn, _mfn(mfn), 0, t);
+    rc = guest_physmap_add_entry(d, gfn, mfn, 0, t);
 
     /* If we fail to add the mapping, we need to drop the reference we
      * took earlier on foreign pages */
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 7a35063546..ef84b72474 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -256,7 +256,7 @@  static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa, unsigned int flags)
 #define __va(x)             (maddr_to_virt(x))
 
 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
-#define virt_to_mfn(va)   (virt_to_maddr(va) >> PAGE_SHIFT)
+#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
 #define mfn_to_virt(mfn)  (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
 
 /*
@@ -266,6 +266,7 @@  static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa, unsigned int flags)
  */
 #define mfn_to_page(mfn)    __mfn_to_page(mfn)
 #define page_to_mfn(pg)     __page_to_mfn(pg)
+#define virt_to_mfn(va)     __virt_to_mfn(va)
 
 /* Convert between Xen-heap virtual addresses and page-info structures. */
 static inline struct page_info *virt_to_page(const void *v)