Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dmitryc patches4 #244

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions Documentation/kasan.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@ KASAN uses compile-time instrumentation for checking every memory access,
therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is
required for detection of out-of-bounds accesses to stack or global variables.

Currently KASAN is supported only for x86_64 architecture and requires the
kernel to be built with the SLUB allocator.
Currently KASAN is supported only for x86_64 architecture.

1. Usage
========
Expand All @@ -27,7 +26,7 @@ inline are compiler instrumentation types. The former produces smaller binary
the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC
version 5.0 or later.

Currently KASAN works only with the SLUB memory allocator.
KASAN works with both SLUB and SLAB memory allocators.
For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.

To disable instrumentation for specific files or directories, add a line
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ endif
KASAN_SANITIZE_head$(BITS).o := n
KASAN_SANITIZE_dumpstack.o := n
KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n

CFLAGS_irq.o := -I$(src)/../include/asm/trace

Expand Down
65 changes: 48 additions & 17 deletions include/linux/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ struct kmem_cache;
struct page;
struct vm_struct;

typedef size_t cache_size_t;

#ifdef CONFIG_KASAN

#define KASAN_SHADOW_SCALE_SHIFT 3
Expand Down Expand Up @@ -44,20 +46,34 @@ static inline void kasan_disable_current(void)
void kasan_unpoison_shadow(const void *address, size_t size);

void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
void kasan_poison_free_pages(struct page *page, unsigned int order);

void kasan_cache_create(struct kmem_cache *cache, cache_size_t *size,
unsigned long *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_destroy(struct kmem_cache *cache);

void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);

void kasan_kmalloc_large(const void *ptr, size_t size);
void kasan_kfree_large(const void *ptr);
void kasan_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
void kasan_krealloc(const void *object, size_t new_size);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_poison_kfree_large(const void *ptr);
void kasan_poison_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
gfp_t flags);
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);

void kasan_slab_alloc(struct kmem_cache *s, void *object);
void kasan_slab_free(struct kmem_cache *s, void *object);
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
/* kasan_slab_free() returns true if the object has been put into quarantine.
*/
bool kasan_slab_free(struct kmem_cache *s, void *object);
void kasan_poison_slab_free(struct kmem_cache *s, void *object);

struct kasan_cache {
int alloc_meta_offset;
int free_meta_offset;
};

int kasan_module_alloc(void *addr, size_t size);
void kasan_free_shadow(const struct vm_struct *vm);
Expand All @@ -70,23 +86,38 @@ static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {}

static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_poison_free_pages(struct page *page,
unsigned int order) {}

static inline void kasan_cache_create(struct kmem_cache *cache,
cache_size_t *size,
unsigned long *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_destroy(struct kmem_cache *cache) {}

static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {}

static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
static inline void kasan_kfree_large(const void *ptr) {}
static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void kasan_poison_kfree_large(const void *ptr) {}
static inline void kasan_poison_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size) {}
static inline void kasan_krealloc(const void *object, size_t new_size) {}

static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
size_t size, gfp_t flags) {}
static inline void kasan_krealloc(const void *object, size_t new_size,
gfp_t flags) {}

static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags) {}
/* kasan_slab_free() returns true if the object has been put into quarantine.
*/
static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
{
return false;
}
static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}

static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
Expand Down
6 changes: 6 additions & 0 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,12 @@
# define SLAB_FAILSLAB 0x00000000UL
#endif

#ifdef CONFIG_KASAN
#define SLAB_KASAN 0x08000000UL
#else
#define SLAB_KASAN 0x00000000UL
#endif

/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
Expand Down
13 changes: 13 additions & 0 deletions include/linux/slab_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,21 @@ struct kmem_cache {
#ifdef CONFIG_MEMCG_KMEM
struct memcg_cache_params memcg_params;
#endif
#ifdef CONFIG_KASAN
struct kasan_cache kasan_info;
#endif

struct kmem_cache_node *node[MAX_NUMNODES];
};

static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
void *x) {
void *object = x - (x - page->s_mem) % cache->size;
void *last_object = page->s_mem + (cache->num - 1) * cache->size;
if (unlikely(object > last_object))
return last_object;
else
return object;
}

#endif /* _LINUX_SLAB_DEF_H */
11 changes: 11 additions & 0 deletions include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,4 +129,15 @@ static inline void *virt_to_obj(struct kmem_cache *s,
void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason);

static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
void *x) {
void *object = x - (x - page_address(page)) % cache->size;
void *last_object = page_address(page) +
(page->objects - 1) * cache->size;
if (unlikely(object > last_object))
return last_object;
else
return object;
}

#endif /* _LINUX_SLUB_DEF_H */
4 changes: 3 additions & 1 deletion lib/Kconfig.kasan
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ if HAVE_ARCH_KASAN

config KASAN
bool "KASan: runtime memory debugger"
depends on SLUB_DEBUG
depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB)
select CONSTRUCTORS
help
Enables kernel address sanitizer - runtime memory debugger,
Expand All @@ -16,6 +16,8 @@ config KASAN
This feature consumes about 1/8 of available memory and brings about
~x3 performance slowdown.
For better error detection enable CONFIG_STACKTRACE.
Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).

choice
prompt "Instrumentation type"
Expand Down
59 changes: 58 additions & 1 deletion lib/test_kasan.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,34 @@ static noinline void __init kmalloc_node_oob_right(void)
kfree(ptr);
}

static noinline void __init kmalloc_large_oob_right(void)
#ifdef CONFIG_SLUB
static noinline void __init kmalloc_pagealloc_oob_right(void)
{
char *ptr;
/* Allocate a chunk that does not fit into a slab to trigger the page
* allocator fallback.
*/
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

pr_info("kmalloc page allocator allocation: out-of-bounds to right\n");
ptr = kmalloc(size, GFP_KERNEL);
if (!ptr) {
pr_err("Allocation failed\n");
return;
}

ptr[size] = 0;
kfree(ptr);
}
#endif

static noinline void __init kmalloc_large_oob_right(void)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
/* Allocate a chunk that is large enough, but still fits into a slab
* and does not trigger the page allocator fallback in SLUB.
*/
pr_info("kmalloc large allocation: out-of-bounds to right\n");
ptr = kmalloc(size, GFP_KERNEL);
if (!ptr) {
Expand Down Expand Up @@ -271,6 +294,8 @@ static noinline void __init kmalloc_uaf2(void)
}

ptr1[40] = 'x';
if (ptr1 == ptr2)
pr_err("Could not detect use-after-free: ptr1 == ptr2\n");
kfree(ptr2);
}

Expand Down Expand Up @@ -319,11 +344,40 @@ static noinline void __init kasan_stack_oob(void)
*(volatile char *)p;
}

#ifdef CONFIG_SLAB
static noinline void __init kasan_quarantine_cache(void)
{
struct kmem_cache *cache = kmem_cache_create(
"test", 137, 8, GFP_KERNEL, NULL);
int i;

for (i = 0; i < 100; i++) {
void *p = kmem_cache_alloc(cache, GFP_KERNEL);

kmem_cache_free(cache, p);
p = kmalloc(sizeof(u64), GFP_KERNEL);
kfree(p);
}
kmem_cache_shrink(cache);
for (i = 0; i < 100; i++) {
u64 *p = kmem_cache_alloc(cache, GFP_KERNEL);

kmem_cache_free(cache, p);
p = kmalloc(sizeof(u64), GFP_KERNEL);
kfree(p);
}
kmem_cache_destroy(cache);
}
#endif

static int __init kmalloc_tests_init(void)
{
kmalloc_oob_right();
kmalloc_oob_left();
kmalloc_node_oob_right();
#ifdef CONFIG_SLUB
kmalloc_pagealloc_oob_right();
#endif
kmalloc_large_oob_right();
kmalloc_oob_krealloc_more();
kmalloc_oob_krealloc_less();
Expand All @@ -339,6 +393,9 @@ static int __init kmalloc_tests_init(void)
kmem_cache_oob();
kasan_stack_oob();
kasan_global_oob();
#ifdef CONFIG_SLAB
kasan_quarantine_cache();
#endif
return -EAGAIN;
}

Expand Down
1 change: 1 addition & 0 deletions mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#

KASAN_SANITIZE_slab_common.o := n
KASAN_SANITIZE_slab.o := n
KASAN_SANITIZE_slub.o := n

mmu-y := nommu.o
Expand Down
3 changes: 3 additions & 0 deletions mm/kasan/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,6 @@ CFLAGS_REMOVE_kasan.o = -pg
CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)

obj-y := kasan.o report.o kasan_init.o
ifdef CONFIG_SLAB
obj-y += stackdepot.o quarantine.o
endif
Loading