From c2c07512bbd80d670dc80c5a534f597636489705 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 25 Feb 2016 12:59:27 +1100 Subject: [PATCH] mm/page_poison.c: enable PAGE_POISONING as a separate option Page poisoning is currently set up as a feature if architectures don't have architecture debug page_alloc to allow unmapping of pages. It has uses apart from that though. Clearing of the pages on free provides an increase in security as it helps to limit the risk of information leaks. Allow page poisoning to be enabled as a separate option independent of any other debug feature. Because of how hiberanation is implemented, the checks on alloc cannot occur if hibernation is enabled. This option can also be set on !HIBERNATION as well. Credit to Mathias Krause and grsecurity for original work Signed-off-by: Laura Abbott Cc: "Kirill A. Shutemov" Cc: Vlastimil Babka Cc: Michal Hocko Cc: Kees Cook Cc: Mathias Krause Cc: Dave Hansen Cc: Jianyu Zhan Signed-off-by: Andrew Morton --- include/linux/mm.h | 3 +++ mm/Kconfig.debug | 22 +++++++++++++++++++++- mm/debug-pagealloc.c | 8 +------- mm/page_alloc.c | 2 ++ mm/page_poison.c | 14 ++++++++++++++ 5 files changed, 41 insertions(+), 8 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 539c409baf3445..9eb4e21931ed81 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2179,10 +2179,13 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, extern void poison_pages(struct page *page, int n); extern void unpoison_pages(struct page *page, int n); extern bool page_poisoning_enabled(void); +extern void kernel_poison_pages(struct page *page, int numpages, int enable); #else static inline void poison_pages(struct page *page, int n) { } static inline void unpoison_pages(struct page *page, int n) { } static inline bool page_poisoning_enabled(void) { return false; } +static inline void kernel_poison_pages(struct page *page, int numpages, + int enable) { } #endif #ifdef CONFIG_DEBUG_PAGEALLOC diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index a0c136af9c9144..f89d1c99f97f67 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -41,4 +41,24 @@ config DEBUG_PAGEALLOC_ENABLE_DEFAULT can be overridden by debug_pagealloc=off|on. config PAGE_POISONING - bool + bool "Poisson pages after freeing" + select PAGE_EXTENSION + select PAGE_POISONING_NO_SANITY if HIBERNATION + ---help--- + Fill the pages with poison patterns after free_pages() and verify + the patterns before alloc_pages. The filling of the memory helps + reduce the risk of information leaks from freed data. This does + have a potential performance impact. + + If unsure, say N + +config PAGE_POISONING_NO_SANITY + depends on PAGE_POISONING + bool "Only poison, don't sanity check" + ---help--- + Skip the sanity checking on alloc, only fill the pages with + poison on free. This reduces some of the overhead of the + poisoning feature. + + If you are only interested in sanitization, say Y. Otherwise + say N. diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c index 3cc4c1dbd2d898..0928d1390a6d98 100644 --- a/mm/debug-pagealloc.c +++ b/mm/debug-pagealloc.c @@ -8,11 +8,5 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) { - if (!page_poisoning_enabled()) - return; - - if (enable) - unpoison_pages(page, numpages); - else - poison_pages(page, numpages); + kernel_poison_pages(page, numpages, enable); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 36a0a792f4f873..8987902186b842 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1007,6 +1007,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order) PAGE_SIZE << order); } arch_free_page(page, order); + kernel_poison_pages(page, 1 << order, 0); kernel_map_pages(page, 1 << order, 0); return true; @@ -1401,6 +1402,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, set_page_refcounted(page); arch_alloc_page(page, order); + kernel_poison_pages(page, 1 << order, 1); kernel_map_pages(page, 1 << order, 1); kasan_alloc_pages(page, order); diff --git a/mm/page_poison.c b/mm/page_poison.c index 92ead727b8f095..312b131d58756e 100644 --- a/mm/page_poison.c +++ b/mm/page_poison.c @@ -101,6 +101,9 @@ static void check_poison_mem(unsigned char *mem, size_t bytes) unsigned char *start; unsigned char *end; + if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY)) + return; + start = memchr_inv(mem, PAGE_POISON, bytes); if (!start) return; @@ -142,3 +145,14 @@ void unpoison_pages(struct page *page, int n) for (i = 0; i < n; i++) unpoison_page(page + i); } + +void kernel_poison_pages(struct page *page, int numpages, int enable) +{ + if (!page_poisoning_enabled()) + return; + + if (enable) + unpoison_pages(page, numpages); + else + poison_pages(page, numpages); +}