Message ID | 20180403153251.19595-9-julien.grall@arm.com |
---|---|
State | Accepted |
Commit | c748d32597076ecb66a27ed63525fc5f3d5addda |
Headers | show |
Series | xen: Convert page_to_mfn and mfn_to_page to use typesafe MFN | expand |
On Tue, 3 Apr 2018, Julien Grall wrote: > The function populate_pt_range is used to populate in advance the > page-table but it will not do the actual mapping. So passing the MFN in > parameter is pointless. Note that the only caller pass 0... > > At the same time replace 0 by INVALID_MFNs. While this does not matter > as the entry will marked as not valid and populated, INVALID_MFN > helps the reader to know the MFN is invalid. > > Signed-off-by: Julien Grall <julien.grall@arm.com> > Acked-by: Andrew Cooper <andrew.cooper3@citrix.com> > Reviewed-by: Wei Liu <wei.liu2@citrix.com> > Reviewed-by: George Dunlap <george.dunlap@citrix.com> Acked-by: Stefano Stabellini <sstabellini@kernel.org> > -- > > Cc: Stefano Stabellini <sstabellini@kernel.org> > Cc: Julien Grall <julien.grall@arm.com> > Cc: Ian Jackson <ian.jackson@eu.citrix.com> > Cc: Jan Beulich <jbeulich@suse.com> > Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> > Cc: Tim Deegan <tim@xen.org> > > Changes in v6: > - Add George's and Wei's reviewed-by > - Add Andrew's acked-by > > Changes in v5: > - Update the commit message to explain why 0 -> INVALID_MFN. > > Changes in v4: > - Patch added. > --- > xen/arch/arm/mm.c | 5 ++--- > xen/arch/x86/mm.c | 5 ++--- > xen/common/vmap.c | 2 +- > xen/include/xen/mm.h | 3 +-- > 4 files changed, 6 insertions(+), 9 deletions(-) > > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 1126e246c0..436df6936b 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -1072,10 +1072,9 @@ int map_pages_to_xen(unsigned long virt, > return create_xen_entries(INSERT, virt, _mfn(mfn), nr_mfns, flags); > } > > -int populate_pt_range(unsigned long virt, unsigned long mfn, > - unsigned long nr_mfns) > +int populate_pt_range(unsigned long virt, unsigned long nr_mfns) > { > - return create_xen_entries(RESERVE, virt, _mfn(mfn), nr_mfns, 0); > + return create_xen_entries(RESERVE, virt, INVALID_MFN, nr_mfns, 0); > } > > int destroy_xen_mappings(unsigned long v, unsigned long e) > diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c > index 605f4377fa..6d5f40482e 100644 > --- a/xen/arch/x86/mm.c > +++ b/xen/arch/x86/mm.c > @@ -5007,10 +5007,9 @@ int map_pages_to_xen( > return 0; > } > > -int populate_pt_range(unsigned long virt, unsigned long mfn, > - unsigned long nr_mfns) > +int populate_pt_range(unsigned long virt, unsigned long nr_mfns) > { > - return map_pages_to_xen(virt, mfn, nr_mfns, MAP_SMALL_PAGES); > + return map_pages_to_xen(virt, mfn_x(INVALID_MFN), nr_mfns, MAP_SMALL_PAGES); > } > > /* > diff --git a/xen/common/vmap.c b/xen/common/vmap.c > index 0b23f8fb97..11785ffb0a 100644 > --- a/xen/common/vmap.c > +++ b/xen/common/vmap.c > @@ -42,7 +42,7 @@ void __init vm_init_type(enum vmap_region type, void *start, void *end) > bitmap_fill(vm_bitmap(type), vm_low[type]); > > /* Populate page tables for the bitmap if necessary. */ > - populate_pt_range(va, 0, vm_low[type] - nr); > + populate_pt_range(va, vm_low[type] - nr); > } > > static void *vm_alloc(unsigned int nr, unsigned int align, > diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h > index 142aa73354..538478fa24 100644 > --- a/xen/include/xen/mm.h > +++ b/xen/include/xen/mm.h > @@ -175,8 +175,7 @@ int destroy_xen_mappings(unsigned long v, unsigned long e); > * Create only non-leaf page table entries for the > * page range in Xen virtual address space. > */ > -int populate_pt_range(unsigned long virt, unsigned long mfn, > - unsigned long nr_mfns); > +int populate_pt_range(unsigned long virt, unsigned long nr_mfns); > /* Claim handling */ > unsigned long domain_adjust_tot_pages(struct domain *d, long pages); > int domain_set_outstanding_pages(struct domain *d, unsigned long pages); > -- > 2.11.0 >
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 1126e246c0..436df6936b 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -1072,10 +1072,9 @@ int map_pages_to_xen(unsigned long virt, return create_xen_entries(INSERT, virt, _mfn(mfn), nr_mfns, flags); } -int populate_pt_range(unsigned long virt, unsigned long mfn, - unsigned long nr_mfns) +int populate_pt_range(unsigned long virt, unsigned long nr_mfns) { - return create_xen_entries(RESERVE, virt, _mfn(mfn), nr_mfns, 0); + return create_xen_entries(RESERVE, virt, INVALID_MFN, nr_mfns, 0); } int destroy_xen_mappings(unsigned long v, unsigned long e) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 605f4377fa..6d5f40482e 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5007,10 +5007,9 @@ int map_pages_to_xen( return 0; } -int populate_pt_range(unsigned long virt, unsigned long mfn, - unsigned long nr_mfns) +int populate_pt_range(unsigned long virt, unsigned long nr_mfns) { - return map_pages_to_xen(virt, mfn, nr_mfns, MAP_SMALL_PAGES); + return map_pages_to_xen(virt, mfn_x(INVALID_MFN), nr_mfns, MAP_SMALL_PAGES); } /* diff --git a/xen/common/vmap.c b/xen/common/vmap.c index 0b23f8fb97..11785ffb0a 100644 --- a/xen/common/vmap.c +++ b/xen/common/vmap.c @@ -42,7 +42,7 @@ void __init vm_init_type(enum vmap_region type, void *start, void *end) bitmap_fill(vm_bitmap(type), vm_low[type]); /* Populate page tables for the bitmap if necessary. */ - populate_pt_range(va, 0, vm_low[type] - nr); + populate_pt_range(va, vm_low[type] - nr); } static void *vm_alloc(unsigned int nr, unsigned int align, diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 142aa73354..538478fa24 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -175,8 +175,7 @@ int destroy_xen_mappings(unsigned long v, unsigned long e); * Create only non-leaf page table entries for the * page range in Xen virtual address space. */ -int populate_pt_range(unsigned long virt, unsigned long mfn, - unsigned long nr_mfns); +int populate_pt_range(unsigned long virt, unsigned long nr_mfns); /* Claim handling */ unsigned long domain_adjust_tot_pages(struct domain *d, long pages); int domain_set_outstanding_pages(struct domain *d, unsigned long pages);