@@ -133,8 +133,8 @@ static __always_inline void __flush_icache_all(void)
int set_memory_valid(unsigned long addr, int numpages, int enable);
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page, int numpages);
+int set_direct_map_default_noflush(struct page *page, int numpages);
bool kernel_page_present(struct page *page);
#include <asm-generic/cacheflush.h>
@@ -148,34 +148,36 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
__pgprot(PTE_VALID));
}
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(struct page *page, int numpages)
{
struct page_change_data data = {
.set_mask = __pgprot(0),
.clear_mask = __pgprot(PTE_VALID),
};
+ unsigned long size = PAGE_SIZE * numpages;
if (!debug_pagealloc_enabled() && !rodata_full)
return 0;
return apply_to_page_range(&init_mm,
(unsigned long)page_address(page),
- PAGE_SIZE, change_page_range, &data);
+ size, change_page_range, &data);
}
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(struct page *page, int numpages)
{
struct page_change_data data = {
.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
.clear_mask = __pgprot(PTE_RDONLY),
};
+ unsigned long size = PAGE_SIZE * numpages;
if (!debug_pagealloc_enabled() && !rodata_full)
return 0;
return apply_to_page_range(&init_mm,
(unsigned long)page_address(page),
- PAGE_SIZE, change_page_range, &data);
+ size, change_page_range, &data);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -32,8 +32,8 @@ void protect_kernel_linear_mapping_text_rodata(void);
static inline void protect_kernel_linear_mapping_text_rodata(void) {}
#endif
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page, int numpages);
+int set_direct_map_default_noflush(struct page *page, int numpages);
bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */
@@ -156,11 +156,11 @@ int set_memory_nx(unsigned long addr, int numpages)
return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
}
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(struct page *page, int numpages)
{
int ret;
unsigned long start = (unsigned long)page_address(page);
- unsigned long end = start + PAGE_SIZE;
+ unsigned long end = start + PAGE_SIZE * numpages;
struct pageattr_masks masks = {
.set_mask = __pgprot(0),
.clear_mask = __pgprot(_PAGE_PRESENT)
@@ -173,11 +173,11 @@ int set_direct_map_invalid_noflush(struct page *page)
return ret;
}
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(struct page *page, int numpages)
{
int ret;
unsigned long start = (unsigned long)page_address(page);
- unsigned long end = start + PAGE_SIZE;
+ unsigned long end = start + PAGE_SIZE * numpages;
struct pageattr_masks masks = {
.set_mask = PAGE_KERNEL,
.clear_mask = __pgprot(0)
@@ -80,8 +80,8 @@ int set_pages_wb(struct page *page, int numpages);
int set_pages_ro(struct page *page, int numpages);
int set_pages_rw(struct page *page, int numpages);
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page, int numpages);
+int set_direct_map_default_noflush(struct page *page, int numpages);
bool kernel_page_present(struct page *page);
extern int kernel_set_to_readonly;
@@ -2192,14 +2192,14 @@ static int __set_pages_np(struct page *page, int numpages)
return __change_page_attr_set_clr(&cpa, 0);
}
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(struct page *page, int numpages)
{
- return __set_pages_np(page, 1);
+ return __set_pages_np(page, numpages);
}
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(struct page *page, int numpages)
{
- return __set_pages_p(page, 1);
+ return __set_pages_p(page, numpages);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -15,11 +15,11 @@ static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
-static inline int set_direct_map_invalid_noflush(struct page *page)
+static inline int set_direct_map_invalid_noflush(struct page *page, int numpages)
{
return 0;
}
-static inline int set_direct_map_default_noflush(struct page *page)
+static inline int set_direct_map_default_noflush(struct page *page, int numpages)
{
return 0;
}
@@ -86,7 +86,7 @@ static inline void hibernate_restore_unprotect_page(void *page_address) {}
static inline void hibernate_map_page(struct page *page)
{
if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
- int ret = set_direct_map_default_noflush(page);
+ int ret = set_direct_map_default_noflush(page, 1);
if (ret)
pr_warn_once("Failed to remap page\n");
@@ -99,7 +99,7 @@ static inline void hibernate_unmap_page(struct page *page)
{
if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
unsigned long addr = (unsigned long)page_address(page);
- int ret = set_direct_map_invalid_noflush(page);
+ int ret = set_direct_map_invalid_noflush(page, 1);
if (ret)
pr_warn_once("Failed to remap page\n");
@@ -2469,14 +2469,15 @@ struct vm_struct *remove_vm_area(const void *addr)
}
static inline void set_area_direct_map(const struct vm_struct *area,
- int (*set_direct_map)(struct page *page))
+ int (*set_direct_map)(struct page *page,
+ int numpages))
{
int i;
/* HUGE_VMALLOC passes small pages to set_direct_map */
for (i = 0; i < area->nr_pages; i++)
if (page_address(area->pages[i]))
- set_direct_map(area->pages[i]);
+ set_direct_map(area->pages[i], 1);
}
/* Handle removing and resetting vm mappings related to the vm_struct. */