This repository has been archived by the owner on Dec 23, 2024. It is now read-only.
-
-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy path0007.patch
94 lines (83 loc) · 2.58 KB
/
0007.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
From 977266822fd37e7d7097b8e1c914db04976d8288 Mon Sep 17 00:00:00 2001
From: Daniel Micay <danielmicay@gmail.com>
Date: Fri, 20 Jan 2017 16:51:25 -0500
Subject: [PATCH] add page sanitization / verification
---
include/linux/highmem.h | 22 ++++++++++++++++++++++
mm/page_alloc.c | 12 ++++++++++--
2 files changed, 32 insertions(+), 2 deletions(-)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 35cd6c33079..56543d48059 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
+#include <linux/string.h>
#include <asm/cacheflush.h>
@@ -211,6 +212,27 @@ static inline void clear_highpage(struct page *page)
kunmap_atomic(kaddr);
}
+static inline void sanitize_highpage(struct page *page)
+{
+ void *kaddr;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ kaddr = kmap_atomic(page);
+ clear_page(kaddr);
+ kunmap_atomic(kaddr);
+ local_irq_restore(flags);
+}
+
+static inline void sanitize_highpage_verify(struct page *page)
+{
+ void *kaddr;
+
+ kaddr = kmap_atomic(page);
+ BUG_ON(memchr_inv(kaddr, 0, PAGE_SIZE));
+ kunmap_atomic(kaddr);
+}
+
static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5a5e5a19c7c..bfb89cde2c9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -718,6 +718,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
int i;
int bad = 0;
+ unsigned long index = 1UL << order;
+
trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
@@ -740,6 +742,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
+
+ for (; index; --index)
+ sanitize_highpage(page + index - 1);
+
arch_free_page(page, order);
kernel_map_pages(page, 1 << order, 0);
@@ -896,6 +902,8 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
{
int i;
+ unsigned long index = 1UL << order;
+
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
@@ -908,8 +916,8 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
- if (gfp_flags & __GFP_ZERO)
- prep_zero_page(page, order, gfp_flags);
+ for (; index; --index)
+ sanitize_highpage_verify(page + index - 1);
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);