@@ -251,7 +251,8 @@ static inline bool memblock_is_nomap(str
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
- unsigned long *out_end_pfn, int *out_nid);
+ unsigned long *out_end_pfn, int *out_nid,
+ struct memblock_type *type);
/**
* for_each_mem_pfn_range - early memory pfn range iterator
@@ -263,9 +264,17 @@ void __next_mem_pfn_range(int *idx, int
*
* Walks over configured memory ranges.
*/
-#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
- for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
- i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
+#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
+ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.memory); \
+ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.memory))
+
+#define for_each_res_pfn_range(i, nid, p_start, p_end, p_nid) \
+ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.reserved); \
+ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid, \
+ &memblock.reserved))
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
@@ -64,7 +64,8 @@ void __dump_page(struct page *page, cons
* dump_page() when detected.
*/
if (page_poisoned) {
- pr_warn("page:%px is uninitialized and poisoned", page);
+ pr_warn("page:%px pfn:%ld is uninitialized and poisoned",
+ page, page_to_pfn(page));
goto hex_only;
}
@@ -1198,9 +1198,9 @@ void __init_memblock __next_mem_range_re
*/
void __init_memblock __next_mem_pfn_range(int *idx, int nid,
unsigned long *out_start_pfn,
- unsigned long *out_end_pfn, int *out_nid)
+ unsigned long *out_end_pfn, int *out_nid,
+ struct memblock_type *type)
{
- struct memblock_type *type = &memblock.memory;
struct memblock_region *r;
int r_nid;
@@ -1458,6 +1458,7 @@ static void __meminit init_reserved_page
{
pg_data_t *pgdat;
int nid, zid;
+ bool found = false;
if (!early_page_uninitialised(pfn))
return;
@@ -1468,10 +1469,15 @@ static void __meminit init_reserved_page
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
struct zone *zone = &pgdat->node_zones[zid];
- if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
+ if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) {
+ found = true;
break;
+ }
}
- __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
+ if (likely(found))
+ __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
+ else
+ WARN_ON_ONCE(1);
}
#else
static inline void init_reserved_page(unsigned long pfn)
@@ -6227,7 +6233,7 @@ void __init __weak memmap_init(unsigned
unsigned long zone,
unsigned long range_start_pfn)
{
- unsigned long start_pfn, end_pfn, next_pfn = 0;
+ unsigned long start_pfn, end_pfn, prev_pfn = 0;
unsigned long range_end_pfn = range_start_pfn + size;
u64 pgcnt = 0;
int i;
@@ -6235,7 +6241,7 @@ void __init __weak memmap_init(unsigned
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
- next_pfn = clamp(next_pfn, range_start_pfn, range_end_pfn);
+ prev_pfn = clamp(prev_pfn, range_start_pfn, range_end_pfn);
if (end_pfn > start_pfn) {
size = end_pfn - start_pfn;
@@ -6243,10 +6249,10 @@ void __init __weak memmap_init(unsigned
MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
}
- if (next_pfn < start_pfn)
- pgcnt += init_unavailable_range(next_pfn, start_pfn,
+ if (prev_pfn < start_pfn)
+ pgcnt += init_unavailable_range(prev_pfn, start_pfn,
zone, nid);
- next_pfn = end_pfn;
+ prev_pfn = end_pfn;
}
/*
@@ -6256,12 +6262,31 @@ void __init __weak memmap_init(unsigned
* considered initialized. Make sure that memmap has a well defined
* state.
*/
- if (next_pfn < range_end_pfn)
- pgcnt += init_unavailable_range(next_pfn, range_end_pfn,
+ if (prev_pfn < range_end_pfn)
+ pgcnt += init_unavailable_range(prev_pfn, range_end_pfn,
zone, nid);
+ /*
+ * memblock.reserved isn't enforced to overlap with
+ * memblock.memory so initialize the struct pages for
+ * memblock.reserved too in case it wasn't overlapping.
+ *
+ * If any struct page associated with a memblock.reserved
+ * range isn't overlapping with a zone range, it'll be left
+ * uninitialized, ideally with PagePoison, and it'll be a more
+ * easily detectable error.
+ */
+ for_each_res_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+ end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
+
+ if (end_pfn > start_pfn)
+ pgcnt += init_unavailable_range(start_pfn, end_pfn,
+ zone, nid);
+ }
+
if (pgcnt)
- pr_info("%s: Zeroed struct page in unavailable ranges: %lld\n",
+ pr_info("%s: pages in unavailable ranges: %lld\n",
zone_names[zone], pgcnt);
}
@@ -6499,6 +6524,10 @@ void __init get_pfn_range_for_nid(unsign
*start_pfn = min(*start_pfn, this_start_pfn);
*end_pfn = max(*end_pfn, this_end_pfn);
}
+ for_each_res_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
+ *start_pfn = min(*start_pfn, this_start_pfn);
+ *end_pfn = max(*end_pfn, this_end_pfn);
+ }
if (*start_pfn == -1UL)
*start_pfn = 0;
@@ -7126,7 +7155,13 @@ unsigned long __init node_map_pfn_alignm
*/
unsigned long __init find_min_pfn_with_active_regions(void)
{
- return PHYS_PFN(memblock_start_of_DRAM());
+ /*
+ * reserved regions must be included so that their page
+ * structure can be part of a zone and obtain a valid zoneid
+ * before __SetPageReserved().
+ */
+ return min(PHYS_PFN(memblock_start_of_DRAM()),
+ PHYS_PFN(memblock.reserved.regions[0].base));
}
/*