@@ -49,8 +49,6 @@ struct map_range_data
/* Override macros from asm/page.h to make them work with mfn_t */
#undef virt_to_mfn
#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
//#define DEBUG_11_ALLOCATION
#ifdef DEBUG_11_ALLOCATION
@@ -286,7 +286,7 @@ static __init int kernel_decompress(struct bootmodule *mod)
iounmap(input);
return -ENOMEM;
}
- mfn = _mfn(page_to_mfn(pages));
+ mfn = page_to_mfn(pages);
output = __vmap(&mfn, 1 << kernel_order_out, 1, 1, PAGE_HYPERVISOR, VMAP_DEFAULT);
rc = perform_gunzip(output, input, size);
@@ -210,7 +210,7 @@ p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
if ( t != p2m_ram_rw )
goto err;
- page = mfn_to_page(mfn_x(mfn));
+ page = mfn_to_page(mfn);
if ( unlikely(!get_page(page, v->domain)) )
page = NULL;
@@ -477,7 +477,7 @@ void unmap_domain_page(const void *va)
local_irq_restore(flags);
}
-unsigned long domain_page_map_to_mfn(const void *ptr)
+mfn_t domain_page_map_to_mfn(const void *ptr)
{
unsigned long va = (unsigned long)ptr;
lpae_t *map = this_cpu(xen_dommap);
@@ -485,12 +485,12 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
unsigned long offset = (va>>THIRD_SHIFT) & LPAE_ENTRY_MASK;
if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
- return __virt_to_mfn(va);
+ return virt_to_mfn(va);
ASSERT(slot >= 0 && slot < DOMHEAP_ENTRIES);
ASSERT(map[slot].pt.avail != 0);
- return map[slot].pt.base + offset;
+ return _mfn(map[slot].pt.base + offset);
}
#endif
@@ -1287,7 +1287,7 @@ int xenmem_add_to_physmap_one(
return -EINVAL;
}
- mfn = _mfn(page_to_mfn(page));
+ mfn = page_to_mfn(page);
t = p2m_map_foreign;
rcu_unlock_domain(od);
@@ -37,12 +37,6 @@ static unsigned int __read_mostly max_vmid = MAX_VMID_8_BIT;
#define P2M_ROOT_PAGES (1<<P2M_ROOT_ORDER)
-/* Override macros from asm/mm.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
unsigned int __read_mostly p2m_ipa_bits;
/* Helpers to lookup the properties of each level */
@@ -90,8 +84,8 @@ void dump_p2m_lookup(struct domain *d, paddr_t addr)
printk("dom%d IPA 0x%"PRIpaddr"\n", d->domain_id, addr);
- printk("P2M @ %p mfn:0x%lx\n",
- p2m->root, __page_to_mfn(p2m->root));
+ printk("P2M @ %p mfn:%#"PRI_mfn"\n",
+ p2m->root, mfn_x(page_to_mfn(p2m->root)));
dump_pt_walk(page_to_maddr(p2m->root), addr,
P2M_ROOT_LEVEL, P2M_ROOT_PAGES);
@@ -653,7 +653,7 @@ static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params)
{
struct vcpu *v;
struct vpmu_struct *vpmu;
- uint64_t mfn;
+ mfn_t mfn;
void *xenpmu_data;
if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
@@ -675,7 +675,7 @@ static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params)
if ( xenpmu_data )
{
mfn = domain_page_map_to_mfn(xenpmu_data);
- ASSERT(mfn_valid(_mfn(mfn)));
+ ASSERT(mfn_valid(mfn));
unmap_domain_page_global(xenpmu_data);
put_page_and_type(mfn_to_page(mfn));
}
@@ -195,7 +195,7 @@ void dump_pageframe_info(struct domain *d)
}
}
printk(" DomPage %p: caf=%08lx, taf=%" PRtype_info "\n",
- _p(page_to_mfn(page)),
+ _p(mfn_x(page_to_mfn(page))),
page->count_info, page->u.inuse.type_info);
}
spin_unlock(&d->page_alloc_lock);
@@ -208,7 +208,7 @@ void dump_pageframe_info(struct domain *d)
page_list_for_each ( page, &d->xenpage_list )
{
printk(" XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
- _p(page_to_mfn(page)),
+ _p(mfn_x(page_to_mfn(page))),
page->count_info, page->u.inuse.type_info);
}
spin_unlock(&d->page_alloc_lock);
@@ -635,7 +635,8 @@ int arch_domain_soft_reset(struct domain *d)
struct page_info *page = virt_to_page(d->shared_info), *new_page;
int ret = 0;
struct domain *owner;
- unsigned long mfn, gfn;
+ mfn_t mfn;
+ unsigned long gfn;
p2m_type_t p2mt;
unsigned int i;
@@ -669,7 +670,7 @@ int arch_domain_soft_reset(struct domain *d)
ASSERT( owner == d );
mfn = page_to_mfn(page);
- gfn = mfn_to_gmfn(d, mfn);
+ gfn = mfn_to_gmfn(d, mfn_x(mfn));
/*
* gfn == INVALID_GFN indicates that the shared_info page was never mapped
@@ -678,7 +679,7 @@ int arch_domain_soft_reset(struct domain *d)
if ( gfn == gfn_x(INVALID_GFN) )
goto exit_put_page;
- if ( mfn_x(get_gfn_query(d, gfn, &p2mt)) != mfn )
+ if ( !mfn_eq(get_gfn_query(d, gfn, &p2mt), mfn) )
{
printk(XENLOG_G_ERR "Failed to get Dom%d's shared_info GFN (%lx)\n",
d->domain_id, gfn);
@@ -695,7 +696,7 @@ int arch_domain_soft_reset(struct domain *d)
goto exit_put_gfn;
}
- ret = guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn), PAGE_ORDER_4K);
+ ret = guest_physmap_remove_page(d, _gfn(gfn), mfn, PAGE_ORDER_4K);
if ( ret )
{
printk(XENLOG_G_ERR "Failed to remove Dom%d's shared_info frame %lx\n",
@@ -704,7 +705,7 @@ int arch_domain_soft_reset(struct domain *d)
goto exit_put_gfn;
}
- ret = guest_physmap_add_page(d, _gfn(gfn), _mfn(page_to_mfn(new_page)),
+ ret = guest_physmap_add_page(d, _gfn(gfn), page_to_mfn(new_page),
PAGE_ORDER_4K);
if ( ret )
{
@@ -1000,7 +1001,7 @@ int arch_set_info_guest(
{
if ( (page->u.inuse.type_info & PGT_type_mask) ==
PGT_l4_page_table )
- done = !fill_ro_mpt(_mfn(page_to_mfn(page)));
+ done = !fill_ro_mpt(page_to_mfn(page));
page_unlock(page);
}
@@ -1129,7 +1130,7 @@ int arch_set_info_guest(
l4_pgentry_t *l4tab;
l4tab = map_domain_page(pagetable_get_mfn(v->arch.guest_table));
- *l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
+ *l4tab = l4e_from_mfn(page_to_mfn(cr3_page),
_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
unmap_domain_page(l4tab);
}
@@ -1998,7 +1999,7 @@ int domain_relinquish_resources(struct domain *d)
if ( d->arch.pirq_eoi_map != NULL )
{
unmap_domain_page_global(d->arch.pirq_eoi_map);
- put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn));
+ put_page_and_type(mfn_to_page(_mfn(d->arch.pirq_eoi_map_mfn)));
d->arch.pirq_eoi_map = NULL;
d->arch.auto_unmask = 0;
}
@@ -331,13 +331,13 @@ void unmap_domain_page_global(const void *ptr)
}
/* Translate a map-domain-page'd address to the underlying MFN */
-unsigned long domain_page_map_to_mfn(const void *ptr)
+mfn_t domain_page_map_to_mfn(const void *ptr)
{
unsigned long va = (unsigned long)ptr;
const l1_pgentry_t *pl1e;
if ( va >= DIRECTMAP_VIRT_START )
- return virt_to_mfn(ptr);
+ return _mfn(virt_to_mfn(ptr));
if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
{
@@ -350,5 +350,5 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
pl1e = &__linear_l1_table[l1_linear_offset(va)];
}
- return l1e_get_pfn(*pl1e);
+ return l1e_get_mfn(*pl1e);
}
@@ -193,7 +193,7 @@ static int modified_memory(struct domain *d,
* These are most probably not page tables any more
* don't take a long time and don't die either.
*/
- sh_remove_shadows(d, _mfn(page_to_mfn(page)), 1, 0);
+ sh_remove_shadows(d, page_to_mfn(page), 1, 0);
put_page(page);
}
}
@@ -119,7 +119,7 @@ static int __init pvh_populate_memory_range(struct domain *d,
continue;
}
- rc = guest_physmap_add_page(d, _gfn(start), _mfn(page_to_mfn(page)),
+ rc = guest_physmap_add_page(d, _gfn(start), page_to_mfn(page),
order);
if ( rc != 0 )
{
@@ -269,7 +269,7 @@ static int __init pvh_setup_vmx_realmode_helpers(struct domain *d)
}
write_32bit_pse_identmap(ident_pt);
unmap_domain_page(ident_pt);
- put_page(mfn_to_page(mfn_x(mfn)));
+ put_page(mfn_to_page(mfn));
d->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] = gaddr;
if ( pvh_add_mem_range(d, gaddr, gaddr + PAGE_SIZE, E820_RESERVED) )
printk("Unable to set identity page tables as reserved in the memory map\n");
@@ -287,7 +287,7 @@ static void __init pvh_steal_low_ram(struct domain *d, unsigned long start,
for ( mfn = start; mfn < start + nr_pages; mfn++ )
{
- struct page_info *pg = mfn_to_page(mfn);
+ struct page_info *pg = mfn_to_page(_mfn(mfn));
int rc;
rc = unshare_xen_page_with_guest(pg, dom_io);
@@ -591,7 +591,7 @@ static void *hvmemul_map_linear_addr(
goto unhandleable;
}
- *mfn++ = _mfn(page_to_mfn(page));
+ *mfn++ = page_to_mfn(page);
if ( p2m_is_discard_write(p2mt) )
{
@@ -623,7 +623,7 @@ static void *hvmemul_map_linear_addr(
out:
/* Drop all held references. */
while ( mfn-- > hvmemul_ctxt->mfn )
- put_page(mfn_to_page(mfn_x(*mfn)));
+ put_page(mfn_to_page(*mfn));
return err;
}
@@ -649,7 +649,7 @@ static void hvmemul_unmap_linear_addr(
{
ASSERT(mfn_valid(*mfn));
paging_mark_dirty(currd, *mfn);
- put_page(mfn_to_page(mfn_x(*mfn)));
+ put_page(mfn_to_page(*mfn));
*mfn++ = _mfn(0); /* Clean slot for map()'s error checking. */
}
@@ -2247,7 +2247,7 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
v->arch.guest_table = pagetable_from_page(page);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_vcpu.guest_cr[3], page_to_mfn(page));
+ v->arch.hvm_vcpu.guest_cr[3], mfn_x(page_to_mfn(page)));
}
}
else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
@@ -2631,7 +2631,7 @@ void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent)
void hvm_unmap_guest_frame(void *p, bool_t permanent)
{
- unsigned long mfn;
+ mfn_t mfn;
struct page_info *page;
if ( !p )
@@ -2652,7 +2652,7 @@ void hvm_unmap_guest_frame(void *p, bool_t permanent)
list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
if ( track->page == page )
{
- paging_mark_dirty(d, _mfn(mfn));
+ paging_mark_dirty(d, mfn);
list_del(&track->list);
xfree(track);
break;
@@ -2669,7 +2669,7 @@ void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
spin_lock(&d->arch.hvm_domain.write_map.lock);
list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
- paging_mark_dirty(d, _mfn(page_to_mfn(track->page)));
+ paging_mark_dirty(d, page_to_mfn(track->page));
spin_unlock(&d->arch.hvm_domain.write_map.lock);
}
@@ -3243,8 +3243,8 @@ static enum hvm_translation_result __hvm_copy(
if ( xchg(&lastpage, gfn_x(gfn)) != gfn_x(gfn) )
dprintk(XENLOG_G_DEBUG,
- "%pv attempted write to read-only gfn %#lx (mfn=%#lx)\n",
- v, gfn_x(gfn), page_to_mfn(page));
+ "%pv attempted write to read-only gfn %#lx (mfn=%#"PRI_mfn")\n",
+ v, gfn_x(gfn), mfn_x(page_to_mfn(page)));
}
else
{
@@ -268,7 +268,7 @@ static void hvm_remove_ioreq_gfn(
struct domain *d, struct hvm_ioreq_page *iorp)
{
if ( guest_physmap_remove_page(d, _gfn(iorp->gfn),
- _mfn(page_to_mfn(iorp->page)), 0) )
+ page_to_mfn(iorp->page), 0) )
domain_crash(d);
clear_page(iorp->va);
}
@@ -281,7 +281,7 @@ static int hvm_add_ioreq_gfn(
clear_page(iorp->va);
rc = guest_physmap_add_page(d, _gfn(iorp->gfn),
- _mfn(page_to_mfn(iorp->page)), 0);
+ page_to_mfn(iorp->page), 0);
if ( rc == 0 )
paging_mark_pfn_dirty(d, _pfn(iorp->gfn));
@@ -590,7 +590,7 @@ void stdvga_init(struct domain *d)
if ( pg == NULL )
break;
s->vram_page[i] = pg;
- clear_domain_page(_mfn(page_to_mfn(pg)));
+ clear_domain_page(page_to_mfn(pg));
}
if ( i == ARRAY_SIZE(s->vram_page) )
@@ -1552,7 +1552,7 @@ static int svm_cpu_up_prepare(unsigned int cpu)
if ( !pg )
goto err;
- clear_domain_page(_mfn(page_to_mfn(pg)));
+ clear_domain_page(page_to_mfn(pg));
*this_hsa = page_to_maddr(pg);
}
@@ -1562,7 +1562,7 @@ static int svm_cpu_up_prepare(unsigned int cpu)
if ( !pg )
goto err;
- clear_domain_page(_mfn(page_to_mfn(pg)));
+ clear_domain_page(page_to_mfn(pg));
*this_vmcb = page_to_maddr(pg);
}
@@ -354,7 +354,7 @@ static void enable_hypercall_page(struct domain *d)
if ( page )
put_page(page);
gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
- gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
+ gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
return;
}
@@ -414,7 +414,7 @@ static void initialize_vp_assist(struct vcpu *v)
fail:
gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", gmfn,
- page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
+ mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
}
static void teardown_vp_assist(struct vcpu *v)
@@ -492,7 +492,7 @@ static void update_reference_tsc(struct domain *d, bool_t initialize)
if ( page )
put_page(page);
gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
- gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
+ gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
return;
}
@@ -1434,7 +1434,7 @@ int vmx_vcpu_enable_pml(struct vcpu *v)
vmx_vmcs_enter(v);
- __vmwrite(PML_ADDRESS, page_to_mfn(v->arch.hvm_vmx.pml_pg) << PAGE_SHIFT);
+ __vmwrite(PML_ADDRESS, page_to_maddr(v->arch.hvm_vmx.pml_pg));
__vmwrite(GUEST_PML_INDEX, NR_PML_ENTRIES - 1);
v->arch.hvm_vmx.secondary_exec_control |= SECONDARY_EXEC_ENABLE_PML;
@@ -2931,7 +2931,7 @@ gp_fault:
static int vmx_alloc_vlapic_mapping(struct domain *d)
{
struct page_info *pg;
- unsigned long mfn;
+ mfn_t mfn;
if ( !cpu_has_vmx_virtualize_apic_accesses )
return 0;
@@ -2940,10 +2940,10 @@ static int vmx_alloc_vlapic_mapping(struct domain *d)
if ( !pg )
return -ENOMEM;
mfn = page_to_mfn(pg);
- clear_domain_page(_mfn(mfn));
+ clear_domain_page(mfn);
share_xen_page_with_guest(pg, d, XENSHARE_writable);
- d->arch.hvm_domain.vmx.apic_access_mfn = mfn;
- set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), _mfn(mfn),
+ d->arch.hvm_domain.vmx.apic_access_mfn = mfn_x(mfn);
+ set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), mfn,
PAGE_ORDER_4K, p2m_get_hostp2m(d)->default_access);
return 0;
@@ -2954,7 +2954,7 @@ static void vmx_free_vlapic_mapping(struct domain *d)
unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
if ( mfn != 0 )
- free_shared_domheap_page(mfn_to_page(mfn));
+ free_shared_domheap_page(mfn_to_page(_mfn(mfn)));
}
static void vmx_install_vlapic_mapping(struct vcpu *v)
@@ -84,7 +84,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
}
v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap;
- clear_domain_page(_mfn(page_to_mfn(vmread_bitmap)));
+ clear_domain_page(page_to_mfn(vmread_bitmap));
vmwrite_bitmap = alloc_domheap_page(NULL, 0);
if ( !vmwrite_bitmap )
@@ -1733,7 +1733,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
nvcpu->nv_vvmcx = vvmcx;
nvcpu->nv_vvmcxaddr = gpa;
v->arch.hvm_vmx.vmcs_shadow_maddr =
- pfn_to_paddr(domain_page_map_to_mfn(vvmcx));
+ mfn_to_maddr(domain_page_map_to_mfn(vvmcx));
}
else
{
@@ -1819,7 +1819,7 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs)
{
if ( writable )
clear_vvmcs_launched(&nvmx->launched_list,
- domain_page_map_to_mfn(vvmcs));
+ mfn_x(domain_page_map_to_mfn(vvmcs)));
else
rc = VMFAIL_VALID;
hvm_unmap_guest_frame(vvmcs, 0);
@@ -131,10 +131,6 @@
#include "pv/mm.h"
/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
#undef virt_to_mfn
#define virt_to_mfn(v) _mfn(__virt_to_mfn(v))
@@ -469,20 +469,20 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
if ( l3p )
{
unmap_domain_page(l3p);
- put_page(mfn_to_page(mfn_x(gw->l3mfn)));
+ put_page(mfn_to_page(gw->l3mfn));
}
#endif
#if GUEST_PAGING_LEVELS >= 3
if ( l2p )
{
unmap_domain_page(l2p);
- put_page(mfn_to_page(mfn_x(gw->l2mfn)));
+ put_page(mfn_to_page(gw->l2mfn));
}
#endif
if ( l1p )
{
unmap_domain_page(l1p);
- put_page(mfn_to_page(mfn_x(gw->l1mfn)));
+ put_page(mfn_to_page(gw->l1mfn));
}
return walk_ok;
@@ -83,7 +83,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
*pfec &= ~PFEC_page_present;
goto out_tweak_pfec;
}
- top_mfn = _mfn(page_to_mfn(top_page));
+ top_mfn = page_to_mfn(top_page);
/* Map the top-level table and call the tree-walker */
ASSERT(mfn_valid(top_mfn));
@@ -42,12 +42,6 @@
#include "private.h"
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
/************************************************/
/* HAP VRAM TRACKING SUPPORT */
/************************************************/
@@ -173,7 +173,7 @@ nept_walk_tables(struct vcpu *v, unsigned long l2ga, ept_walk_t *gw)
goto map_err;
gw->lxe[lvl] = lxp[ept_lvl_table_offset(l2ga, lvl)];
unmap_domain_page(lxp);
- put_page(mfn_to_page(mfn_x(lxmfn)));
+ put_page(mfn_to_page(lxmfn));
if ( nept_non_present_check(gw->lxe[lvl]) )
goto non_present;
@@ -152,11 +152,6 @@ static inline shr_handle_t get_next_handle(void)
#define mem_sharing_enabled(d) \
(is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
static atomic_t nr_saved_mfns = ATOMIC_INIT(0);
static atomic_t nr_shared_mfns = ATOMIC_INIT(0);
@@ -74,13 +74,13 @@ static int atomic_write_ept_entry(ept_entry_t *entryptr, ept_entry_t new,
goto out;
rc = -ESRCH;
- fdom = page_get_owner(mfn_to_page(new.mfn));
+ fdom = page_get_owner(mfn_to_page(_mfn(new.mfn)));
if ( fdom == NULL )
goto out;
/* get refcount on the page */
rc = -EBUSY;
- if ( !get_page(mfn_to_page(new.mfn), fdom) )
+ if ( !get_page(mfn_to_page(_mfn(new.mfn)), fdom) )
goto out;
}
}
@@ -91,7 +91,7 @@ static int atomic_write_ept_entry(ept_entry_t *entryptr, ept_entry_t new,
write_atomic(&entryptr->epte, new.epte);
if ( unlikely(oldmfn != mfn_x(INVALID_MFN)) )
- put_page(mfn_to_page(oldmfn));
+ put_page(mfn_to_page(_mfn(oldmfn)));
rc = 0;
@@ -270,7 +270,7 @@ static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int l
}
p2m_tlb_flush_sync(p2m);
- p2m_free_ptp(p2m, mfn_to_page(ept_entry->mfn));
+ p2m_free_ptp(p2m, mfn_to_page(_mfn(ept_entry->mfn)));
}
static bool_t ept_split_super_page(struct p2m_domain *p2m,
@@ -29,12 +29,6 @@
#include "mm-locks.h"
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
#define superpage_aligned(_x) (((_x)&(SUPERPAGE_PAGES-1))==0)
/* Enforce lock ordering when grabbing the "external" page_alloc lock */
@@ -47,12 +47,6 @@ bool_t __initdata opt_hap_1gb = 1, __initdata opt_hap_2mb = 1;
boolean_param("hap_1gb", opt_hap_1gb);
boolean_param("hap_2mb", opt_hap_2mb);
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
DEFINE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
/* Init the datastructures for later use by the p2m code */
@@ -47,12 +47,6 @@
/* Per-CPU variable for enforcing the lock ordering */
DEFINE_PER_CPU(int, mm_lock_level);
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
/************************************************/
/* LOG DIRTY SUPPORT */
/************************************************/
@@ -315,7 +315,7 @@ static inline int page_is_out_of_sync(struct page_info *p)
static inline int mfn_is_out_of_sync(mfn_t gmfn)
{
- return page_is_out_of_sync(mfn_to_page(mfn_x(gmfn)));
+ return page_is_out_of_sync(mfn_to_page(gmfn));
}
static inline int page_oos_may_write(struct page_info *p)
@@ -326,7 +326,7 @@ static inline int page_oos_may_write(struct page_info *p)
static inline int mfn_oos_may_write(mfn_t gmfn)
{
- return page_oos_may_write(mfn_to_page(mfn_x(gmfn)));
+ return page_oos_may_write(mfn_to_page(gmfn));
}
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
@@ -465,18 +465,6 @@ void sh_reset_l3_up_pointers(struct vcpu *v);
* MFN/page-info handling
*/
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
-#undef page_to_mfn
-#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
-
-/* Override pagetable_t <-> struct page_info conversions to work with mfn_t */
-#undef pagetable_get_page
-#define pagetable_get_page(x) mfn_to_page(pagetable_get_mfn(x))
-#undef pagetable_from_page
-#define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg))
-
#define backpointer(sp) _mfn(pdx_to_pfn((unsigned long)(sp)->v.sh.back))
static inline unsigned long __backpointer(const struct page_info *sp)
{
@@ -430,7 +430,7 @@ static void dump_numa(unsigned char key)
spin_lock(&d->page_alloc_lock);
page_list_for_each(page, &d->page_list)
{
- i = phys_to_nid((paddr_t)page_to_mfn(page) << PAGE_SHIFT);
+ i = phys_to_nid(page_to_maddr(page));
page_num_node[i]++;
}
spin_unlock(&d->page_alloc_lock);
@@ -239,7 +239,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
}
if ( cmpxchg(&currd->arch.pirq_eoi_map_mfn,
- 0, page_to_mfn(page)) != 0 )
+ 0, mfn_x(page_to_mfn(page))) != 0 )
{
put_page_and_type(page);
ret = -EBUSY;
@@ -31,12 +31,6 @@
#include <public/callback.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
static int register_guest_nmi_callback(unsigned long address)
{
struct vcpu *curr = current;
@@ -25,12 +25,6 @@
#include <asm/p2m.h>
#include <asm/pv/mm.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
/*
* Flush the LDT, dropping any typerefs. Returns a boolean indicating whether
* mappings have been removed (i.e. a TLB flush is needed).
@@ -64,7 +64,7 @@ static __init void mark_pv_pt_pages_rdonly(struct domain *d,
for ( count = 0; count < nr_pt_pages; count++ )
{
l1e_remove_flags(*pl1e, _PAGE_RW);
- page = mfn_to_page(l1e_get_pfn(*pl1e));
+ page = mfn_to_page(l1e_get_mfn(*pl1e));
/* Read-only mapping + PGC_allocated + page-table page. */
page->count_info = PGC_allocated | 3;
@@ -496,7 +496,7 @@ int __init dom0_construct_pv(struct domain *d,
page = alloc_domheap_pages(d, order, 0);
if ( page == NULL )
panic("Not enough RAM for domain 0 allocation");
- alloc_spfn = page_to_mfn(page);
+ alloc_spfn = mfn_x(page_to_mfn(page));
alloc_epfn = alloc_spfn + d->tot_pages;
if ( initrd_len )
@@ -524,12 +524,12 @@ int __init dom0_construct_pv(struct domain *d,
mpt_alloc = (paddr_t)initrd->mod_start << PAGE_SHIFT;
init_domheap_pages(mpt_alloc,
mpt_alloc + PAGE_ALIGN(initrd_len));
- initrd->mod_start = initrd_mfn = page_to_mfn(page);
+ initrd->mod_start = initrd_mfn = mfn_x(page_to_mfn(page));
}
else
{
while ( count-- )
- if ( assign_pages(d, mfn_to_page(mfn++), 0, 0) )
+ if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 0, 0) )
BUG();
}
initrd->mod_end = 0;
@@ -661,7 +661,7 @@ int __init dom0_construct_pv(struct domain *d,
L1_PROT : COMPAT_L1_PROT));
l1tab++;
- page = mfn_to_page(mfn);
+ page = mfn_to_page(_mfn(mfn));
if ( !page->u.inuse.type_info &&
!get_page_and_type(page, d, PGT_writable_page) )
BUG();
@@ -801,7 +801,7 @@ int __init dom0_construct_pv(struct domain *d,
si->nr_p2m_frames = d->tot_pages - count;
page_list_for_each ( page, &d->page_list )
{
- mfn = page_to_mfn(page);
+ mfn = mfn_x(page_to_mfn(page));
BUG_ON(SHARED_M2P(get_gpfn_from_mfn(mfn)));
if ( get_gpfn_from_mfn(mfn) >= count )
{
@@ -826,7 +826,7 @@ int __init dom0_construct_pv(struct domain *d,
panic("Not enough RAM for DOM0 reservation");
while ( pfn < d->tot_pages )
{
- mfn = page_to_mfn(page);
+ mfn = mfn_x(page_to_mfn(page));
#ifndef NDEBUG
#define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
#endif
@@ -11,12 +11,6 @@
#include <asm/pv/domain.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
static void noreturn continue_nonidle_domain(struct vcpu *v)
{
check_wakeup_from_wait();
@@ -41,12 +41,6 @@
#include "emulate.h"
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
static int read_gate_descriptor(unsigned int gate_sel,
const struct vcpu *v,
unsigned int *sel,
@@ -43,16 +43,6 @@
#include "emulate.h"
#include "mm.h"
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
-/***********************
- * I/O emulation support
- */
-
struct priv_op_ctxt {
struct x86_emulate_ctxt ctxt;
struct {
@@ -27,12 +27,6 @@
#include "mm.h"
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
static unsigned int grant_to_pte_flags(unsigned int grant_flags,
unsigned int cache_flags)
{
@@ -33,12 +33,6 @@
#include "emulate.h"
#include "mm.h"
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
/*********************
* Writable Pagetables
*/
@@ -37,8 +37,6 @@
#include <compat/grant_table.h>
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
#undef virt_to_mfn
#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
@@ -848,7 +846,7 @@ static unsigned long batch_memory_op(unsigned int cmd, unsigned int order,
set_xen_guest_handle(xmr.extent_start, pfns);
page_list_for_each ( pg, list )
{
- pfns[xmr.nr_extents++] = page_to_mfn(pg);
+ pfns[xmr.nr_extents++] = mfn_x(page_to_mfn(pg));
if ( xmr.nr_extents == ARRAY_SIZE(pfns) || !page_list_next(pg, list) )
{
long nr = xen_hypercall_memory_op(cmd, &xmr);
@@ -48,12 +48,6 @@
#include <mach_wakecpu.h>
#include <smpboot_hooks.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
#define setup_trampoline() (bootsym_phys(trampoline_realmode_entry))
unsigned long __read_mostly trampoline_phys;
@@ -184,7 +184,7 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
for ( mfn = 0; mfn < max_page; mfn++ )
{
- struct page_info *page = mfn_to_page(mfn);
+ struct page_info *page = mfn_to_page(_mfn(mfn));
if ( !mfn_valid(_mfn(mfn)) )
continue;
@@ -276,7 +276,7 @@ static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE],
vmac_set_key((uint8_t *)key, &ctx);
for ( mfn = 0; mfn < max_page; mfn++ )
{
- struct page_info *page = __mfn_to_page(mfn);
+ struct page_info *page = mfn_to_page(_mfn(mfn));
if ( !mfn_valid(_mfn(mfn)) )
continue;
@@ -835,8 +835,8 @@ int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val)
}
gdprintk(XENLOG_WARNING,
- "Bad GMFN %lx (MFN %lx) to MSR %08x\n",
- gmfn, page ? page_to_mfn(page) : -1UL, base);
+ "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n",
+ gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN), base);
return 0;
}
@@ -40,12 +40,6 @@ asm(".file \"" __FILE__ "\"");
#include <asm/mem_sharing.h>
#include <public/memory.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-
unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
l2_pgentry_t *compat_idle_pg_table_l2;
@@ -1220,7 +1220,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
}
v->vcpu_info = new_info;
- v->vcpu_info_mfn = _mfn(page_to_mfn(page));
+ v->vcpu_info_mfn = page_to_mfn(page);
/* Set new vcpu_info pointer /before/ setting pending flags. */
smp_wmb();
@@ -1253,7 +1253,7 @@ void unmap_vcpu_info(struct vcpu *v)
vcpu_info_reset(v); /* NB: Clobbers v->vcpu_info_mfn */
- put_page_and_type(mfn_to_page(mfn_x(mfn)));
+ put_page_and_type(mfn_to_page(mfn));
}
int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
@@ -40,12 +40,6 @@
#include <xsm/xsm.h>
#include <asm/flushtlb.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-
/* Per-domain grant information. */
struct grant_table {
/*
@@ -23,12 +23,6 @@
#include <asm/page.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
/*
* When kexec transitions to the new kernel there is a one-to-one
* mapping between physical and virtual addresses. On processors
@@ -33,12 +33,6 @@
#include <asm/guest.h>
#endif
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-
struct memop_args {
/* INPUT */
struct domain *domain; /* Domain to be affected. */
@@ -151,12 +151,6 @@
#define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
#endif
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
-
/*
* Comma-separated list of hexadecimal page numbers containing bad bytes.
* e.g. 'badpage=0x3f45,0x8a321'.
@@ -243,7 +243,7 @@ static void tmem_persistent_pool_page_put(void *page_va)
struct page_info *pi;
ASSERT(IS_PAGE_ALIGNED(page_va));
- pi = mfn_to_page(virt_to_mfn(page_va));
+ pi = mfn_to_page(_mfn(virt_to_mfn(page_va)));
ASSERT(IS_VALID_PAGE(pi));
__tmem_free_page_thispool(pi);
}
@@ -14,10 +14,6 @@
#include <xen/cpu.h>
#include <xen/init.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
bool __read_mostly opt_tmem;
boolean_param("tmem", opt_tmem);
@@ -243,7 +243,7 @@ static int alloc_trace_bufs(unsigned int pages)
/* Now share the trace pages */
for ( i = 0; i < pages; i++ )
{
- pg = mfn_to_page(t_info_mfn_list[offset + i]);
+ pg = mfn_to_page(_mfn(t_info_mfn_list[offset + i]));
share_xen_page_with_privileged_guests(pg, XENSHARE_writable);
}
}
@@ -274,7 +274,7 @@ out_dealloc:
uint32_t mfn = t_info_mfn_list[offset + i];
if ( !mfn )
break;
- ASSERT(!(mfn_to_page(mfn)->count_info & PGC_allocated));
+ ASSERT(!(mfn_to_page(_mfn(mfn))->count_info & PGC_allocated));
free_xenheap_pages(mfn_to_virt(mfn), 0);
}
}
@@ -9,10 +9,6 @@
#include <xen/vmap.h>
#include <asm/page.h>
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef page_to_mfn
-#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
-
static DEFINE_SPINLOCK(vm_lock);
static void *__read_mostly vm_base[VMAP_REGION_NR];
#define vm_bitmap(x) ((unsigned long *)vm_base[x])
@@ -274,7 +270,7 @@ static void *vmalloc_type(size_t size, enum vmap_region type)
error:
while ( i-- )
- free_domheap_page(mfn_to_page(mfn_x(mfn[i])));
+ free_domheap_page(mfn_to_page(mfn[i]));
xfree(mfn);
return NULL;
}
@@ -22,8 +22,6 @@
/* Override macros from asm/page.h to make them work with mfn_t */
#undef virt_to_mfn
#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-#undef mfn_to_page
-#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
/* Limit amount of pages used for shared buffer (per domain) */
#define MAX_OPROF_SHARED_PAGES 32
@@ -451,7 +451,7 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned long pfn,
BUG_ON( table == NULL || level < IOMMU_PAGING_MODE_LEVEL_1 ||
level > IOMMU_PAGING_MODE_LEVEL_6 );
- next_table_mfn = page_to_mfn(table);
+ next_table_mfn = mfn_x(page_to_mfn(table));
if ( level == IOMMU_PAGING_MODE_LEVEL_1 )
{
@@ -493,7 +493,7 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned long pfn,
return 1;
}
- next_table_mfn = page_to_mfn(table);
+ next_table_mfn = mfn_x(page_to_mfn(table));
set_iommu_pde_present((u32*)pde, next_table_mfn, next_level,
!!IOMMUF_writable, !!IOMMUF_readable);
@@ -520,7 +520,7 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned long pfn,
unmap_domain_page(next_table_vaddr);
return 1;
}
- next_table_mfn = page_to_mfn(table);
+ next_table_mfn = mfn_x(page_to_mfn(table));
set_iommu_pde_present((u32*)pde, next_table_mfn, next_level,
!!IOMMUF_writable, !!IOMMUF_readable);
}
@@ -577,7 +577,7 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
}
new_root_vaddr = __map_domain_page(new_root);
- old_root_mfn = page_to_mfn(old_root);
+ old_root_mfn = mfn_x(page_to_mfn(old_root));
set_iommu_pde_present(new_root_vaddr, old_root_mfn, level,
!!IOMMUF_writable, !!IOMMUF_readable);
level++;
@@ -712,7 +712,7 @@ int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
}
/* Deallocate lower level page table */
- free_amd_iommu_pgtable(mfn_to_page(pt_mfn[merge_level - 1]));
+ free_amd_iommu_pgtable(mfn_to_page(_mfn(pt_mfn[merge_level - 1])));
}
out:
@@ -802,7 +802,7 @@ void amd_iommu_share_p2m(struct domain *d)
mfn_t pgd_mfn;
pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
- p2m_table = mfn_to_page(mfn_x(pgd_mfn));
+ p2m_table = mfn_to_page(pgd_mfn);
if ( hd->arch.root_table != p2m_table )
{
@@ -184,7 +184,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
page_list_for_each ( page, &d->page_list )
{
- unsigned long mfn = page_to_mfn(page);
+ unsigned long mfn = mfn_x(page_to_mfn(page));
unsigned long gfn = mfn_to_gmfn(d, mfn);
unsigned int mapping = IOMMUF_readable;
int ret;
@@ -58,7 +58,7 @@ int arch_iommu_populate_page_table(struct domain *d)
if ( is_hvm_domain(d) ||
(page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
{
- unsigned long mfn = page_to_mfn(page);
+ unsigned long mfn = mfn_x(page_to_mfn(page));
unsigned long gfn = mfn_to_gmfn(d, mfn);
if ( gfn != gfn_x(INVALID_GFN) )
@@ -138,7 +138,7 @@ extern vaddr_t xenheap_virt_start;
#endif
#ifdef CONFIG_ARM_32
-#define is_xen_heap_page(page) is_xen_heap_mfn(__page_to_mfn(page))
+#define is_xen_heap_page(page) is_xen_heap_mfn(mfn_x(page_to_mfn(page)))
#define is_xen_heap_mfn(mfn) ({ \
unsigned long mfn_ = (mfn); \
(mfn_ >= mfn_x(xenheap_mfn_start) && \
@@ -147,7 +147,7 @@ extern vaddr_t xenheap_virt_start;
#else
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
#define is_xen_heap_mfn(mfn) \
- (mfn_valid(_mfn(mfn)) && is_xen_heap_page(__mfn_to_page(mfn)))
+ (mfn_valid(_mfn(mfn)) && is_xen_heap_page(mfn_to_page(_mfn(mfn))))
#endif
#define is_xen_fixed_mfn(mfn) \
@@ -220,12 +220,14 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
})
/* Convert between machine frame numbers and page-info structures. */
-#define __mfn_to_page(mfn) (frame_table + (pfn_to_pdx(mfn) - frametable_base_pdx))
-#define __page_to_mfn(pg) pdx_to_pfn((unsigned long)((pg) - frame_table) + frametable_base_pdx)
+#define mfn_to_page(mfn) \
+ (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx))
+#define page_to_mfn(pg) \
+ pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx)
/* Convert between machine addresses and page-info structures. */
-#define maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
-#define page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) << PAGE_SHIFT)
+#define maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma))
+#define page_to_maddr(pg) (mfn_to_maddr(page_to_mfn(pg)))
/* Convert between frame number and address formats. */
#define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
@@ -235,7 +237,7 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
#define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga))
#define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn))
#define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma))
-#define vmap_to_mfn(va) paddr_to_pfn(virt_to_maddr((vaddr_t)va))
+#define vmap_to_mfn(va) maddr_to_mfn(virt_to_maddr((vaddr_t)va))
#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))
/* Page-align address and convert to frame number format */
@@ -293,8 +295,6 @@ static inline uint64_t gvirt_to_maddr(vaddr_t va, paddr_t *pa,
* These are overriden in various source files while underscored version
* remain intact.
*/
-#define mfn_to_page(mfn) __mfn_to_page(mfn)
-#define page_to_mfn(pg) __page_to_mfn(pg)
#define virt_to_mfn(va) __virt_to_mfn(va)
#define mfn_to_virt(mfn) __mfn_to_virt(mfn)
@@ -314,7 +314,7 @@ static inline struct page_info *virt_to_page(const void *v)
static inline void *page_to_virt(const struct page_info *pg)
{
- return mfn_to_virt(page_to_mfn(pg));
+ return mfn_to_virt(mfn_x(page_to_mfn(pg)));
}
struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
@@ -278,7 +278,7 @@ static inline struct page_info *get_page_from_gfn(
{
struct page_info *page;
p2m_type_t p2mt;
- unsigned long mfn = mfn_x(p2m_lookup(d, _gfn(gfn), &p2mt));
+ mfn_t mfn = p2m_lookup(d, _gfn(gfn), &p2mt);
if (t)
*t = p2mt;
@@ -286,7 +286,7 @@ static inline struct page_info *get_page_from_gfn(
if ( !p2m_is_any_ram(p2mt) )
return NULL;
- if ( !mfn_valid(_mfn(mfn)) )
+ if ( !mfn_valid(mfn) )
return NULL;
page = mfn_to_page(mfn);
@@ -271,7 +271,7 @@ struct page_info
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
#define is_xen_heap_mfn(mfn) \
- (__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
+ (__mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(_mfn(mfn))))
#define is_xen_fixed_mfn(mfn) \
((((mfn) << PAGE_SHIFT) >= __pa(&_stext)) && \
(((mfn) << PAGE_SHIFT) <= __pa(&__2M_rwdata_end)))
@@ -384,7 +384,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
static inline struct page_info *get_page_from_mfn(mfn_t mfn, struct domain *d)
{
- struct page_info *page = __mfn_to_page(mfn_x(mfn));
+ struct page_info *page = mfn_to_page(mfn);
if ( unlikely(!mfn_valid(mfn)) || unlikely(!get_page(page, d)) )
{
@@ -479,7 +479,7 @@ extern paddr_t mem_hotplug;
#define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
#define _set_gpfn_from_mfn(mfn, pfn) ({ \
- struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
+ struct domain *d = page_get_owner(mfn_to_page(_mfn(mfn))); \
unsigned long entry = (d && (d == dom_cow)) ? \
SHARED_M2P_ENTRY : (pfn); \
((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 4 || \
@@ -488,7 +488,7 @@ static inline struct page_info *get_page_from_gfn(
/* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */
if ( t )
*t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct;
- page = __mfn_to_page(gfn);
+ page = mfn_to_page(_mfn(gfn));
return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL;
}
@@ -88,10 +88,10 @@
((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
/* Get pointer to info structure of page mapped by pte (struct page_info *). */
-#define l1e_get_page(x) (__mfn_to_page(l1e_get_pfn(x)))
-#define l2e_get_page(x) (__mfn_to_page(l2e_get_pfn(x)))
-#define l3e_get_page(x) (__mfn_to_page(l3e_get_pfn(x)))
-#define l4e_get_page(x) (__mfn_to_page(l4e_get_pfn(x)))
+#define l1e_get_page(x) mfn_to_page(l1e_get_mfn(x))
+#define l2e_get_page(x) mfn_to_page(l2e_get_mfn(x))
+#define l3e_get_page(x) mfn_to_page(l3e_get_mfn(x))
+#define l4e_get_page(x) mfn_to_page(l4e_get_mfn(x))
/* Get pte access flags (unsigned int). */
#define l1e_get_flags(x) (get_pte_flags((x).l1))
@@ -157,10 +157,10 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
#define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) })
/* Construct a pte from a page pointer and access flags. */
-#define l1e_from_page(page, flags) l1e_from_pfn(__page_to_mfn(page), (flags))
-#define l2e_from_page(page, flags) l2e_from_pfn(__page_to_mfn(page), (flags))
-#define l3e_from_page(page, flags) l3e_from_pfn(__page_to_mfn(page), (flags))
-#define l4e_from_page(page, flags) l4e_from_pfn(__page_to_mfn(page), (flags))
+#define l1e_from_page(page, flags) l1e_from_mfn(page_to_mfn(page), flags)
+#define l2e_from_page(page, flags) l2e_from_mfn(page_to_mfn(page), flags)
+#define l3e_from_page(page, flags) l3e_from_mfn(page_to_mfn(page), flags)
+#define l4e_from_page(page, flags) l4e_from_mfn(page_to_mfn(page), flags)
/* Add extra flags to an existing pte. */
#define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags))
@@ -215,13 +215,13 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
/* Page-table type. */
typedef struct { u64 pfn; } pagetable_t;
#define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT)
-#define pagetable_get_page(x) __mfn_to_page((x).pfn)
+#define pagetable_get_page(x) mfn_to_page(pagetable_get_mfn(x))
#define pagetable_get_pfn(x) ((x).pfn)
#define pagetable_get_mfn(x) _mfn(((x).pfn))
#define pagetable_is_null(x) ((x).pfn == 0)
#define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
#define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
-#define pagetable_from_page(pg) pagetable_from_pfn(__page_to_mfn(pg))
+#define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg))
#define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
#define pagetable_null() pagetable_from_pfn(0)
@@ -240,12 +240,12 @@ void copy_page_sse2(void *, const void *);
#define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
/* Convert between machine frame numbers and page-info structures. */
-#define __mfn_to_page(mfn) (frame_table + pfn_to_pdx(mfn))
-#define __page_to_mfn(pg) pdx_to_pfn((unsigned long)((pg) - frame_table))
+#define mfn_to_page(mfn) (frame_table + mfn_to_pdx(mfn))
+#define page_to_mfn(pg) pdx_to_mfn((unsigned long)((pg) - frame_table))
/* Convert between machine addresses and page-info structures. */
-#define __maddr_to_page(ma) __mfn_to_page((ma) >> PAGE_SHIFT)
-#define __page_to_maddr(pg) ((paddr_t)__page_to_mfn(pg) << PAGE_SHIFT)
+#define __maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma))
+#define __page_to_maddr(pg) mfn_to_maddr(page_to_mfn(pg))
/* Convert between frame number and address formats. */
#define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
@@ -264,8 +264,6 @@ void copy_page_sse2(void *, const void *);
#define mfn_to_virt(mfn) __mfn_to_virt(mfn)
#define virt_to_maddr(va) __virt_to_maddr((unsigned long)(va))
#define maddr_to_virt(ma) __maddr_to_virt((unsigned long)(ma))
-#define mfn_to_page(mfn) __mfn_to_page(mfn)
-#define page_to_mfn(pg) __page_to_mfn(pg)
#define maddr_to_page(ma) __maddr_to_page(ma)
#define page_to_maddr(pg) __page_to_maddr(pg)
#define virt_to_page(va) __virt_to_page(va)
@@ -273,7 +271,7 @@ void copy_page_sse2(void *, const void *);
#define pfn_to_paddr(pfn) __pfn_to_paddr(pfn)
#define paddr_to_pfn(pa) __paddr_to_pfn(pa)
#define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa))
-#define vmap_to_mfn(va) l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va)))
+#define vmap_to_mfn(va) _mfn(l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va))))
#define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))
#endif /* !defined(__ASSEMBLY__) */
@@ -34,7 +34,7 @@ void unmap_domain_page(const void *va);
/*
* Given a VA from map_domain_page(), return its underlying MFN.
*/
-unsigned long domain_page_map_to_mfn(const void *va);
+mfn_t domain_page_map_to_mfn(const void *va);
/*
* Similar to the above calls, except the mapping is accessible in all
@@ -44,11 +44,11 @@ unsigned long domain_page_map_to_mfn(const void *va);
void *map_domain_page_global(mfn_t mfn);
void unmap_domain_page_global(const void *va);
-#define __map_domain_page(pg) map_domain_page(_mfn(__page_to_mfn(pg)))
+#define __map_domain_page(pg) map_domain_page(page_to_mfn(pg))
static inline void *__map_domain_page_global(const struct page_info *pg)
{
- return map_domain_page_global(_mfn(__page_to_mfn(pg)));
+ return map_domain_page_global(page_to_mfn(pg));
}
#else /* !CONFIG_DOMAIN_PAGE */
@@ -56,7 +56,7 @@ static inline void *__map_domain_page_global(const struct page_info *pg)
#define map_domain_page(mfn) __mfn_to_virt(mfn_x(mfn))
#define __map_domain_page(pg) page_to_virt(pg)
#define unmap_domain_page(va) ((void)(va))
-#define domain_page_map_to_mfn(va) virt_to_mfn((unsigned long)(va))
+#define domain_page_map_to_mfn(va) _mfn(virt_to_mfn((unsigned long)(va)))
static inline void *map_domain_page_global(mfn_t mfn)
{
@@ -277,13 +277,8 @@ struct page_list_head
# define PAGE_LIST_NULL ((typeof(((struct page_info){}).list.next))~0)
# if !defined(pdx_to_page) && !defined(page_to_pdx)
-# if defined(__page_to_mfn) || defined(__mfn_to_page)
-# define page_to_pdx __page_to_mfn
-# define pdx_to_page __mfn_to_page
-# else
# define page_to_pdx page_to_mfn
# define pdx_to_page mfn_to_page
-# endif
# endif
# define PAGE_LIST_HEAD_INIT(name) { NULL, NULL }
@@ -25,7 +25,7 @@
typedef uint32_t pagesize_t; /* like size_t, must handle largest PAGE_SIZE */
#define IS_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
-#define IS_VALID_PAGE(_pi) mfn_valid(_mfn(page_to_mfn(_pi)))
+#define IS_VALID_PAGE(_pi) mfn_valid(page_to_mfn(_pi))
extern struct page_list_head tmem_page_list;
extern spinlock_t tmem_page_list_lock;