diff mbox series

[1/4] mm: introduce debug_pagealloc_map_pages() helper

Message ID 20201025101555.3057-2-rppt@kernel.org
State New
Headers show
Series [1/4] mm: introduce debug_pagealloc_map_pages() helper | expand

Commit Message

Mike Rapoport Oct. 25, 2020, 10:15 a.m. UTC
From: Mike Rapoport <rppt@linux.ibm.com>

When CONFIG_DEBUG_PAGEALLOC is enabled, it unmaps pages from the kernel
direct mapping after free_pages(). The pages than need to be mapped back
before they could be used. Theese mapping operations use
__kernel_map_pages() guarded with with debug_pagealloc_enabled().

The only place that calls __kernel_map_pages() without checking whether
DEBUG_PAGEALLOC is enabled is the hibernation code that presumes
availability of this function when ARCH_HAS_SET_DIRECT_MAP is set.
Still, on arm64, __kernel_map_pages() will bail out when DEBUG_PAGEALLOC is
not enabled but set_direct_map_invalid_noflush() may render some pages not
present in the direct map and hibernation code won't be able to save such
pages.

To make page allocation debugging and hibernation interaction more robust,
the dependency on DEBUG_PAGEALLOC or ARCH_HAS_SET_DIRECT_MAP has to be made
more explicit.

Start with combining the guard condition and the call to
__kernel_map_pages() into a single debug_pagealloc_map_pages() function to
emphasize that __kernel_map_pages() should not be called without
DEBUG_PAGEALLOC and use this new function to map/unmap pages when page
allocation debug is enabled.

As the only remaining user of kernel_map_pages() is the hibernation code,
mode that function into kernel/power/snapshot.c closer to a caller.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
---
 include/linux/mm.h      | 16 +++++++---------
 kernel/power/snapshot.c | 11 +++++++++++
 mm/memory_hotplug.c     |  3 +--
 mm/page_alloc.c         |  6 ++----
 mm/slab.c               |  8 +++-----
 5 files changed, 24 insertions(+), 20 deletions(-)

Comments

David Hildenbrand Oct. 26, 2020, 11:05 a.m. UTC | #1
On 25.10.20 11:15, Mike Rapoport wrote:
> From: Mike Rapoport <rppt@linux.ibm.com>
> 
> When CONFIG_DEBUG_PAGEALLOC is enabled, it unmaps pages from the
> kernel direct mapping after free_pages(). The pages than need to be
> mapped back before they could be used. Theese mapping operations use 
> __kernel_map_pages() guarded with with debug_pagealloc_enabled().
> 
> The only place that calls __kernel_map_pages() without checking
> whether DEBUG_PAGEALLOC is enabled is the hibernation code that
> presumes availability of this function when ARCH_HAS_SET_DIRECT_MAP
> is set. Still, on arm64, __kernel_map_pages() will bail out when
> DEBUG_PAGEALLOC is not enabled but set_direct_map_invalid_noflush()
> may render some pages not present in the direct map and hibernation
> code won't be able to save such pages.
> 
> To make page allocation debugging and hibernation interaction more
> robust, the dependency on DEBUG_PAGEALLOC or ARCH_HAS_SET_DIRECT_MAP
> has to be made more explicit.
> 
> Start with combining the guard condition and the call to 
> __kernel_map_pages() into a single debug_pagealloc_map_pages()
> function to emphasize that __kernel_map_pages() should not be called
> without DEBUG_PAGEALLOC and use this new function to map/unmap pages
> when page allocation debug is enabled.
> 
> As the only remaining user of kernel_map_pages() is the hibernation
> code, mode that function into kernel/power/snapshot.c closer to a
> caller.

s/mode/move/

> 
> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> --- 
> include/linux/mm.h      | 16 +++++++--------- kernel/power/snapshot.c
> | 11 +++++++++++ mm/memory_hotplug.c     |  3 +-- mm/page_alloc.c
> |  6 ++---- mm/slab.c               |  8 +++----- 5 files changed, 24
> insertions(+), 20 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h index
> ef360fe70aaf..14e397f3752c 100644 --- a/include/linux/mm.h +++
> b/include/linux/mm.h @@ -2927,21 +2927,19 @@ static inline bool
> debug_pagealloc_enabled_static(void) #if
> defined(CONFIG_DEBUG_PAGEALLOC) ||
> defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) extern void
> __kernel_map_pages(struct page *page, int numpages, int enable);
> 
> -/* - * When called in DEBUG_PAGEALLOC context, the call should most
> likely be - * guarded by debug_pagealloc_enabled() or
> debug_pagealloc_enabled_static() - */ -static inline void 
> -kernel_map_pages(struct page *page, int numpages, int enable) 
> +static inline void debug_pagealloc_map_pages(struct page *page, +
> int numpages, int enable) { -	__kernel_map_pages(page, numpages,
> enable); +	if (debug_pagealloc_enabled_static()) +
> __kernel_map_pages(page, numpages, enable); } + #ifdef
> CONFIG_HIBERNATION extern bool kernel_page_present(struct page
> *page); #endif	/* CONFIG_HIBERNATION */ #else	/*
> CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ -static
> inline void -kernel_map_pages(struct page *page, int numpages, int
> enable) {} +static inline void debug_pagealloc_map_pages(struct page
> *page, +					     int numpages, int enable) {} #ifdef
> CONFIG_HIBERNATION static inline bool kernel_page_present(struct page
> *page) { return true; } #endif	/* CONFIG_HIBERNATION */ diff --git
> a/kernel/power/snapshot.c b/kernel/power/snapshot.c index
> 46b1804c1ddf..fa499466f645 100644 --- a/kernel/power/snapshot.c +++
> b/kernel/power/snapshot.c @@ -76,6 +76,17 @@ static inline void
> hibernate_restore_protect_page(void *page_address) {} static inline
> void hibernate_restore_unprotect_page(void *page_address) {} #endif
> /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
> 
> +#if defined(CONFIG_DEBUG_PAGEALLOC) ||
> defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) +static inline void 
> +kernel_map_pages(struct page *page, int numpages, int enable) +{ +
> __kernel_map_pages(page, numpages, enable); +} +#else +static inline
> void +kernel_map_pages(struct page *page, int numpages, int enable)
> {} +#endif +

That change should go into a separate patch.


For the debug_pagealloc_map_pages() parts

Reviewed-by: David Hildenbrand <david@redhat.com>
Mike Rapoport Oct. 26, 2020, 11:54 a.m. UTC | #2
On Mon, Oct 26, 2020 at 12:05:13PM +0100, David Hildenbrand wrote:
> On 25.10.20 11:15, Mike Rapoport wrote:
> > From: Mike Rapoport <rppt@linux.ibm.com>
> > 
> > When CONFIG_DEBUG_PAGEALLOC is enabled, it unmaps pages from the
> > kernel direct mapping after free_pages(). The pages than need to be
> > mapped back before they could be used. Theese mapping operations use 
> > __kernel_map_pages() guarded with with debug_pagealloc_enabled().
> > 
> > The only place that calls __kernel_map_pages() without checking
> > whether DEBUG_PAGEALLOC is enabled is the hibernation code that
> > presumes availability of this function when ARCH_HAS_SET_DIRECT_MAP
> > is set. Still, on arm64, __kernel_map_pages() will bail out when
> > DEBUG_PAGEALLOC is not enabled but set_direct_map_invalid_noflush()
> > may render some pages not present in the direct map and hibernation
> > code won't be able to save such pages.
> > 
> > To make page allocation debugging and hibernation interaction more
> > robust, the dependency on DEBUG_PAGEALLOC or ARCH_HAS_SET_DIRECT_MAP
> > has to be made more explicit.
> > 
> > Start with combining the guard condition and the call to 
> > __kernel_map_pages() into a single debug_pagealloc_map_pages()
> > function to emphasize that __kernel_map_pages() should not be called
> > without DEBUG_PAGEALLOC and use this new function to map/unmap pages
> > when page allocation debug is enabled.
> > 
> > As the only remaining user of kernel_map_pages() is the hibernation
> > code, mode that function into kernel/power/snapshot.c closer to a
> > caller.
> 
> s/mode/move/
> 
> > 
> > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> --- 
> > include/linux/mm.h      | 16 +++++++--------- kernel/power/snapshot.c
> > | 11 +++++++++++ mm/memory_hotplug.c     |  3 +-- mm/page_alloc.c
> > |  6 ++---- mm/slab.c               |  8 +++----- 5 files changed, 24
> > insertions(+), 20 deletions(-)
> > 
> > diff --git a/include/linux/mm.h b/include/linux/mm.h index
> > ef360fe70aaf..14e397f3752c 100644 --- a/include/linux/mm.h +++
> > b/include/linux/mm.h @@ -2927,21 +2927,19 @@ static inline bool
> > debug_pagealloc_enabled_static(void) #if
> > defined(CONFIG_DEBUG_PAGEALLOC) ||
> > defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) extern void
> > __kernel_map_pages(struct page *page, int numpages, int enable);
> > 
> > -/* - * When called in DEBUG_PAGEALLOC context, the call should most
> > likely be - * guarded by debug_pagealloc_enabled() or
> > debug_pagealloc_enabled_static() - */ -static inline void 
> > -kernel_map_pages(struct page *page, int numpages, int enable) 
> > +static inline void debug_pagealloc_map_pages(struct page *page, +
> > int numpages, int enable) { -	__kernel_map_pages(page, numpages,
> > enable); +	if (debug_pagealloc_enabled_static()) +
> > __kernel_map_pages(page, numpages, enable); } + #ifdef
> > CONFIG_HIBERNATION extern bool kernel_page_present(struct page
> > *page); #endif	/* CONFIG_HIBERNATION */ #else	/*
> > CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ -static
> > inline void -kernel_map_pages(struct page *page, int numpages, int
> > enable) {} +static inline void debug_pagealloc_map_pages(struct page
> > *page, +					     int numpages, int enable) {} #ifdef
> > CONFIG_HIBERNATION static inline bool kernel_page_present(struct page
> > *page) { return true; } #endif	/* CONFIG_HIBERNATION */ diff --git
> > a/kernel/power/snapshot.c b/kernel/power/snapshot.c index
> > 46b1804c1ddf..fa499466f645 100644 --- a/kernel/power/snapshot.c +++
> > b/kernel/power/snapshot.c @@ -76,6 +76,17 @@ static inline void
> > hibernate_restore_protect_page(void *page_address) {} static inline
> > void hibernate_restore_unprotect_page(void *page_address) {} #endif
> > /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
> > 
> > +#if defined(CONFIG_DEBUG_PAGEALLOC) ||
> > defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) +static inline void 
> > +kernel_map_pages(struct page *page, int numpages, int enable) +{ +
> > __kernel_map_pages(page, numpages, enable); +} +#else +static inline
> > void +kernel_map_pages(struct page *page, int numpages, int enable)
> > {} +#endif +
> 
> That change should go into a separate patch.
 
Hmm, I beleive you refer to moving kernel_map_pages() to snapshot.c,
right?

> For the debug_pagealloc_map_pages() parts
> 
> Reviewed-by: David Hildenbrand <david@redhat.com>
 
Thanks!

> -- 
> Thanks,
> 
> David / dhildenb
>
David Hildenbrand Oct. 26, 2020, 11:55 a.m. UTC | #3
On 26.10.20 12:54, Mike Rapoport wrote:
> On Mon, Oct 26, 2020 at 12:05:13PM +0100, David Hildenbrand wrote:

>> On 25.10.20 11:15, Mike Rapoport wrote:

>>> From: Mike Rapoport <rppt@linux.ibm.com>

>>>

>>> When CONFIG_DEBUG_PAGEALLOC is enabled, it unmaps pages from the

>>> kernel direct mapping after free_pages(). The pages than need to be

>>> mapped back before they could be used. Theese mapping operations use 

>>> __kernel_map_pages() guarded with with debug_pagealloc_enabled().

>>>

>>> The only place that calls __kernel_map_pages() without checking

>>> whether DEBUG_PAGEALLOC is enabled is the hibernation code that

>>> presumes availability of this function when ARCH_HAS_SET_DIRECT_MAP

>>> is set. Still, on arm64, __kernel_map_pages() will bail out when

>>> DEBUG_PAGEALLOC is not enabled but set_direct_map_invalid_noflush()

>>> may render some pages not present in the direct map and hibernation

>>> code won't be able to save such pages.

>>>

>>> To make page allocation debugging and hibernation interaction more

>>> robust, the dependency on DEBUG_PAGEALLOC or ARCH_HAS_SET_DIRECT_MAP

>>> has to be made more explicit.

>>>

>>> Start with combining the guard condition and the call to 

>>> __kernel_map_pages() into a single debug_pagealloc_map_pages()

>>> function to emphasize that __kernel_map_pages() should not be called

>>> without DEBUG_PAGEALLOC and use this new function to map/unmap pages

>>> when page allocation debug is enabled.

>>>

>>> As the only remaining user of kernel_map_pages() is the hibernation

>>> code, mode that function into kernel/power/snapshot.c closer to a

>>> caller.

>>

>> s/mode/move/

>>

>>>

>>> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> --- 

>>> include/linux/mm.h      | 16 +++++++--------- kernel/power/snapshot.c

>>> | 11 +++++++++++ mm/memory_hotplug.c     |  3 +-- mm/page_alloc.c

>>> |  6 ++---- mm/slab.c               |  8 +++----- 5 files changed, 24

>>> insertions(+), 20 deletions(-)

>>>

>>> diff --git a/include/linux/mm.h b/include/linux/mm.h index

>>> ef360fe70aaf..14e397f3752c 100644 --- a/include/linux/mm.h +++

>>> b/include/linux/mm.h @@ -2927,21 +2927,19 @@ static inline bool

>>> debug_pagealloc_enabled_static(void) #if

>>> defined(CONFIG_DEBUG_PAGEALLOC) ||

>>> defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) extern void

>>> __kernel_map_pages(struct page *page, int numpages, int enable);

>>>

>>> -/* - * When called in DEBUG_PAGEALLOC context, the call should most

>>> likely be - * guarded by debug_pagealloc_enabled() or

>>> debug_pagealloc_enabled_static() - */ -static inline void 

>>> -kernel_map_pages(struct page *page, int numpages, int enable) 

>>> +static inline void debug_pagealloc_map_pages(struct page *page, +

>>> int numpages, int enable) { -	__kernel_map_pages(page, numpages,

>>> enable); +	if (debug_pagealloc_enabled_static()) +

>>> __kernel_map_pages(page, numpages, enable); } + #ifdef

>>> CONFIG_HIBERNATION extern bool kernel_page_present(struct page

>>> *page); #endif	/* CONFIG_HIBERNATION */ #else	/*

>>> CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ -static

>>> inline void -kernel_map_pages(struct page *page, int numpages, int

>>> enable) {} +static inline void debug_pagealloc_map_pages(struct page

>>> *page, +					     int numpages, int enable) {} #ifdef

>>> CONFIG_HIBERNATION static inline bool kernel_page_present(struct page

>>> *page) { return true; } #endif	/* CONFIG_HIBERNATION */ diff --git

>>> a/kernel/power/snapshot.c b/kernel/power/snapshot.c index

>>> 46b1804c1ddf..fa499466f645 100644 --- a/kernel/power/snapshot.c +++

>>> b/kernel/power/snapshot.c @@ -76,6 +76,17 @@ static inline void

>>> hibernate_restore_protect_page(void *page_address) {} static inline

>>> void hibernate_restore_unprotect_page(void *page_address) {} #endif

>>> /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */

>>>

>>> +#if defined(CONFIG_DEBUG_PAGEALLOC) ||

>>> defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) +static inline void 

>>> +kernel_map_pages(struct page *page, int numpages, int enable) +{ +

>>> __kernel_map_pages(page, numpages, enable); +} +#else +static inline

>>> void +kernel_map_pages(struct page *page, int numpages, int enable)

>>> {} +#endif +

>>

>> That change should go into a separate patch.

>  

> Hmm, I beleive you refer to moving kernel_map_pages() to snapshot.c,

> right?


Sorry, yes!


-- 
Thanks,

David / dhildenb
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index ef360fe70aaf..14e397f3752c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2927,21 +2927,19 @@  static inline bool debug_pagealloc_enabled_static(void)
 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
 
-/*
- * When called in DEBUG_PAGEALLOC context, the call should most likely be
- * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
- */
-static inline void
-kernel_map_pages(struct page *page, int numpages, int enable)
+static inline void debug_pagealloc_map_pages(struct page *page,
+					     int numpages, int enable)
 {
-	__kernel_map_pages(page, numpages, enable);
+	if (debug_pagealloc_enabled_static())
+		__kernel_map_pages(page, numpages, enable);
 }
+
 #ifdef CONFIG_HIBERNATION
 extern bool kernel_page_present(struct page *page);
 #endif	/* CONFIG_HIBERNATION */
 #else	/* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
-static inline void
-kernel_map_pages(struct page *page, int numpages, int enable) {}
+static inline void debug_pagealloc_map_pages(struct page *page,
+					     int numpages, int enable) {}
 #ifdef CONFIG_HIBERNATION
 static inline bool kernel_page_present(struct page *page) { return true; }
 #endif	/* CONFIG_HIBERNATION */
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 46b1804c1ddf..fa499466f645 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -76,6 +76,17 @@  static inline void hibernate_restore_protect_page(void *page_address) {}
 static inline void hibernate_restore_unprotect_page(void *page_address) {}
 #endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
 
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
+static inline void
+kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	__kernel_map_pages(page, numpages, enable);
+}
+#else
+static inline void
+kernel_map_pages(struct page *page, int numpages, int enable) {}
+#endif
+
 static int swsusp_page_is_free(struct page *);
 static void swsusp_set_page_forbidden(struct page *);
 static void swsusp_unset_page_forbidden(struct page *);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b44d4c7ba73b..e2b6043a4428 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -614,8 +614,7 @@  void generic_online_page(struct page *page, unsigned int order)
 	 * so we should map it first. This is better than introducing a special
 	 * case in page freeing fast path.
 	 */
-	if (debug_pagealloc_enabled_static())
-		kernel_map_pages(page, 1 << order, 1);
+	debug_pagealloc_map_pages(page, 1 << order, 1);
 	__free_pages_core(page, order);
 	totalram_pages_add(1UL << order);
 #ifdef CONFIG_HIGHMEM
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 23f5066bd4a5..9a66a1ff9193 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1272,8 +1272,7 @@  static __always_inline bool free_pages_prepare(struct page *page,
 	 */
 	arch_free_page(page, order);
 
-	if (debug_pagealloc_enabled_static())
-		kernel_map_pages(page, 1 << order, 0);
+	debug_pagealloc_map_pages(page, 1 << order, 0);
 
 	kasan_free_nondeferred_pages(page, order);
 
@@ -2270,8 +2269,7 @@  inline void post_alloc_hook(struct page *page, unsigned int order,
 	set_page_refcounted(page);
 
 	arch_alloc_page(page, order);
-	if (debug_pagealloc_enabled_static())
-		kernel_map_pages(page, 1 << order, 1);
+	debug_pagealloc_map_pages(page, 1 << order, 1);
 	kasan_alloc_pages(page, order);
 	kernel_poison_pages(page, 1 << order, 1);
 	set_page_owner(page, order, gfp_flags);
diff --git a/mm/slab.c b/mm/slab.c
index b1113561b98b..340db0ce74c4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1431,10 +1431,8 @@  static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
 #ifdef CONFIG_DEBUG_PAGEALLOC
 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
 {
-	if (!is_debug_pagealloc_cache(cachep))
-		return;
-
-	kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
+	debug_pagealloc_map_pages(virt_to_page(objp),
+				  cachep->size / PAGE_SIZE, map);
 }
 
 #else
@@ -2062,7 +2060,7 @@  int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
 
 #if DEBUG
 	/*
-	 * If we're going to use the generic kernel_map_pages()
+	 * If we're going to use the generic debug_pagealloc_map_pages()
 	 * poisoning, then it's going to smash the contents of
 	 * the redzone and userword anyhow, so switch them off.
 	 */