@@ -98,7 +98,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
l2_pgentry_t l2e, *l2t;
l1_pgentry_t l1e, *l1t;
unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
- mfn_t mfn = _mfn(cr3 >> PAGE_SHIFT);
+ mfn_t mfn = maddr_to_mfn(cr3);
DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
cr3, pgd3val);
@@ -2640,7 +2640,7 @@ static int sh_remove_shadow_via_pointer(struct domain *d, mfn_t smfn)
ASSERT(sh_type_has_up_pointer(d, sp->u.sh.type));
if (sp->up == 0) return 0;
- pmfn = _mfn(sp->up >> PAGE_SHIFT);
+ pmfn = maddr_to_mfn(sp->up);
ASSERT(mfn_valid(pmfn));
vaddr = map_domain_page(pmfn);
ASSERT(vaddr);
@@ -2425,7 +2425,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
sp = mfn_to_page(smfn);
if ( sp->u.sh.count != 1 || !sp->up )
return 0;
- smfn = _mfn(sp->up >> PAGE_SHIFT);
+ smfn = maddr_to_mfn(sp->up);
ASSERT(mfn_valid(smfn));
#if (SHADOW_PAGING_LEVELS == 4)
@@ -2434,7 +2434,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
ASSERT(sh_type_has_up_pointer(d, SH_type_l2_shadow));
if ( sp->u.sh.count != 1 || !sp->up )
return 0;
- smfn = _mfn(sp->up >> PAGE_SHIFT);
+ smfn = maddr_to_mfn(sp->up);
ASSERT(mfn_valid(smfn));
/* up to l4 */
@@ -2442,7 +2442,7 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
if ( sp->u.sh.count != 1
|| !sh_type_has_up_pointer(d, SH_type_l3_64_shadow) || !sp->up )
return 0;
- smfn = _mfn(sp->up >> PAGE_SHIFT);
+ smfn = maddr_to_mfn(sp->up);
ASSERT(mfn_valid(smfn));
#endif
@@ -504,7 +504,7 @@ static void kimage_free_entry(kimage_entry_t entry)
{
struct page_info *page;
- page = mfn_to_page(entry >> PAGE_SHIFT);
+ page = maddr_to_page(entry);
free_domheap_page(page);
}
@@ -636,8 +636,8 @@ static struct page_info *kimage_alloc_page(struct kexec_image *image,
if ( old )
{
/* If so move it. */
- mfn_t old_mfn = _mfn(*old >> PAGE_SHIFT);
- mfn_t mfn = _mfn(addr >> PAGE_SHIFT);
+ mfn_t old_mfn = maddr_to_mfn(*old);
+ mfn_t mfn = maddr_to_mfn(addr);
copy_domain_page(mfn, old_mfn);
clear_domain_page(old_mfn);