@@ -4458,8 +4458,8 @@ int xenmem_add_to_physmap_one(
gfn_t gpfn)
{
struct page_info *page = NULL;
- unsigned long gfn = 0; /* gcc ... */
- unsigned long prev_mfn, old_gpfn;
+ unsigned long gfn = 0 /* gcc ... */, old_gpfn;
+ mfn_t prev_mfn;
int rc = 0;
mfn_t mfn = INVALID_MFN;
p2m_type_t p2mt;
@@ -4505,12 +4505,12 @@ int xenmem_add_to_physmap_one(
}
/* Remove previously mapped page if it was present. */
- prev_mfn = mfn_x(get_gfn(d, gfn_x(gpfn), &p2mt));
- if ( mfn_valid(_mfn(prev_mfn)) )
+ prev_mfn = get_gfn(d, gfn_x(gpfn), &p2mt);
+ if ( mfn_valid(prev_mfn) )
{
if ( is_xen_heap_mfn(prev_mfn) )
/* Xen heap frames are simply unhooked from this phys slot. */
- rc = guest_physmap_remove_page(d, gpfn, _mfn(prev_mfn), PAGE_ORDER_4K);
+ rc = guest_physmap_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
else
/* Normal domain memory is freed, to avoid leaking memory. */
rc = guest_remove_page(d, gfn_x(gpfn));
@@ -2970,7 +2970,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
if ( mfn_valid(prev_mfn) )
{
- if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
+ if ( is_xen_heap_mfn(prev_mfn) )
/* Xen heap frames are simply unhooked from this phys slot */
rc = guest_physmap_remove_page(tdom, _gfn(gpfn), prev_mfn, 0);
else
@@ -559,7 +559,7 @@ _sh_propagate(struct vcpu *v,
* caching attributes in the shadows to match what was asked for.
*/
if ( (level == 1) && is_hvm_domain(d) &&
- !is_xen_heap_mfn(mfn_x(target_mfn)) )
+ !is_xen_heap_mfn(target_mfn) )
{
int type;
@@ -2104,9 +2104,9 @@ void init_xenheap_pages(paddr_t ps, paddr_t pe)
* Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
* prevent merging of power-of-two blocks across the zone boundary.
*/
- if ( ps && !is_xen_heap_mfn(paddr_to_pfn(ps)-1) )
+ if ( ps && !is_xen_heap_mfn(mfn_add(maddr_to_mfn(ps), -1)) )
ps += PAGE_SIZE;
- if ( !is_xen_heap_mfn(paddr_to_pfn(pe)) )
+ if ( !is_xen_heap_mfn(maddr_to_mfn(pe)) )
pe -= PAGE_SIZE;
memguard_guard_range(maddr_to_virt(ps), pe - ps);
@@ -138,16 +138,16 @@ extern vaddr_t xenheap_virt_start;
#endif
#ifdef CONFIG_ARM_32
-#define is_xen_heap_page(page) is_xen_heap_mfn(mfn_x(page_to_mfn(page)))
+#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
#define is_xen_heap_mfn(mfn) ({ \
- unsigned long mfn_ = (mfn); \
+ unsigned long mfn_ = mfn_x(mfn); \
(mfn_ >= mfn_x(xenheap_mfn_start) && \
mfn_ < mfn_x(xenheap_mfn_end)); \
})
#else
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
#define is_xen_heap_mfn(mfn) \
- (mfn_valid(_mfn(mfn)) && is_xen_heap_page(mfn_to_page(_mfn(mfn))))
+ (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
#endif
#define is_xen_fixed_mfn(mfn) \
@@ -246,7 +246,7 @@ static inline paddr_t __virt_to_maddr(vaddr_t va)
#ifdef CONFIG_ARM_32
static inline void *maddr_to_virt(paddr_t ma)
{
- ASSERT(is_xen_heap_mfn(ma >> PAGE_SHIFT));
+ ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
ma -= mfn_to_maddr(xenheap_mfn_start);
return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
}
@@ -278,7 +278,7 @@ struct page_info
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
#define is_xen_heap_mfn(mfn) \
- (__mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(_mfn(mfn))))
+ (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
#define is_xen_fixed_mfn(mfn) \
(((mfn_to_maddr(mfn)) >= __pa(&_stext)) && \
((mfn_to_maddr(mfn)) <= __pa(&__2M_rwdata_end)))