Skip to content

Commit 80a9201

Browse files
ramosian-glidertorvalds
authored andcommitted
mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
For KASAN builds: - switch SLUB allocator to using stackdepot instead of storing the allocation/deallocation stacks in the objects; - change the freelist hook so that parts of the freelist can be put into the quarantine. [[email protected]: fixes] Link: http://lkml.kernel.org/r/[email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Alexander Potapenko <[email protected]> Cc: Andrey Konovalov <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Steven Rostedt (Red Hat) <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Kostya Serebryany <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Kuthonuzo Luruo <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent c146a2b commit 80a9201

File tree

10 files changed

+93
-56
lines changed

10 files changed

+93
-56
lines changed

include/linux/kasan.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ void kasan_free_shadow(const struct vm_struct *vm);
7777

7878
size_t ksize(const void *);
7979
static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
80+
size_t kasan_metadata_size(struct kmem_cache *cache);
8081

8182
#else /* CONFIG_KASAN */
8283

@@ -121,6 +122,7 @@ static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
121122
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
122123

123124
static inline void kasan_unpoison_slab(const void *ptr) { }
125+
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
124126

125127
#endif /* CONFIG_KASAN */
126128

include/linux/slab_def.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,8 @@ struct kmem_cache {
8888
};
8989

9090
static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
91-
void *x) {
91+
void *x)
92+
{
9293
void *object = x - (x - page->s_mem) % cache->size;
9394
void *last_object = page->s_mem + (cache->num - 1) * cache->size;
9495

include/linux/slub_def.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,10 @@ struct kmem_cache {
104104
unsigned int *random_seq;
105105
#endif
106106

107+
#ifdef CONFIG_KASAN
108+
struct kasan_cache kasan_info;
109+
#endif
110+
107111
struct kmem_cache_node *node[MAX_NUMNODES];
108112
};
109113

lib/Kconfig.kasan

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,9 @@ if HAVE_ARCH_KASAN
55

66
config KASAN
77
bool "KASan: runtime memory debugger"
8-
depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB)
8+
depends on SLUB || (SLAB && !DEBUG_SLAB)
99
select CONSTRUCTORS
10-
select STACKDEPOT if SLAB
10+
select STACKDEPOT
1111
help
1212
Enables kernel address sanitizer - runtime memory debugger,
1313
designed to find out-of-bounds accesses and use-after-free bugs.

mm/kasan/Makefile

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,4 @@ CFLAGS_REMOVE_kasan.o = -pg
77
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
88
CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
99

10-
obj-y := kasan.o report.o kasan_init.o
11-
obj-$(CONFIG_SLAB) += quarantine.o
10+
obj-y := kasan.o report.o kasan_init.o quarantine.o

mm/kasan/kasan.c

Lines changed: 32 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,6 @@ void kasan_free_pages(struct page *page, unsigned int order)
351351
KASAN_FREE_PAGE);
352352
}
353353

354-
#ifdef CONFIG_SLAB
355354
/*
356355
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
357356
* For larger allocations larger redzones are used.
@@ -373,16 +372,8 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
373372
unsigned long *flags)
374373
{
375374
int redzone_adjust;
376-
/* Make sure the adjusted size is still less than
377-
* KMALLOC_MAX_CACHE_SIZE.
378-
* TODO: this check is only useful for SLAB, but not SLUB. We'll need
379-
* to skip it for SLUB when it starts using kasan_cache_create().
380-
*/
381-
if (*size > KMALLOC_MAX_CACHE_SIZE -
382-
sizeof(struct kasan_alloc_meta) -
383-
sizeof(struct kasan_free_meta))
384-
return;
385-
*flags |= SLAB_KASAN;
375+
int orig_size = *size;
376+
386377
/* Add alloc meta. */
387378
cache->kasan_info.alloc_meta_offset = *size;
388379
*size += sizeof(struct kasan_alloc_meta);
@@ -395,14 +386,26 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
395386
}
396387
redzone_adjust = optimal_redzone(cache->object_size) -
397388
(*size - cache->object_size);
389+
398390
if (redzone_adjust > 0)
399391
*size += redzone_adjust;
400-
*size = min(KMALLOC_MAX_CACHE_SIZE,
401-
max(*size,
402-
cache->object_size +
403-
optimal_redzone(cache->object_size)));
392+
393+
*size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
394+
optimal_redzone(cache->object_size)));
395+
396+
/*
397+
* If the metadata doesn't fit, don't enable KASAN at all.
398+
*/
399+
if (*size <= cache->kasan_info.alloc_meta_offset ||
400+
*size <= cache->kasan_info.free_meta_offset) {
401+
cache->kasan_info.alloc_meta_offset = 0;
402+
cache->kasan_info.free_meta_offset = 0;
403+
*size = orig_size;
404+
return;
405+
}
406+
407+
*flags |= SLAB_KASAN;
404408
}
405-
#endif
406409

407410
void kasan_cache_shrink(struct kmem_cache *cache)
408411
{
@@ -414,6 +417,14 @@ void kasan_cache_destroy(struct kmem_cache *cache)
414417
quarantine_remove_cache(cache);
415418
}
416419

420+
size_t kasan_metadata_size(struct kmem_cache *cache)
421+
{
422+
return (cache->kasan_info.alloc_meta_offset ?
423+
sizeof(struct kasan_alloc_meta) : 0) +
424+
(cache->kasan_info.free_meta_offset ?
425+
sizeof(struct kasan_free_meta) : 0);
426+
}
427+
417428
void kasan_poison_slab(struct page *page)
418429
{
419430
kasan_poison_shadow(page_address(page),
@@ -431,16 +442,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
431442
kasan_poison_shadow(object,
432443
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
433444
KASAN_KMALLOC_REDZONE);
434-
#ifdef CONFIG_SLAB
435445
if (cache->flags & SLAB_KASAN) {
436446
struct kasan_alloc_meta *alloc_info =
437447
get_alloc_info(cache, object);
438448
alloc_info->state = KASAN_STATE_INIT;
439449
}
440-
#endif
441450
}
442451

443-
#ifdef CONFIG_SLAB
444452
static inline int in_irqentry_text(unsigned long ptr)
445453
{
446454
return (ptr >= (unsigned long)&__irqentry_text_start &&
@@ -501,7 +509,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
501509
BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
502510
return (void *)object + cache->kasan_info.free_meta_offset;
503511
}
504-
#endif
505512

506513
void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
507514
{
@@ -522,16 +529,16 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
522529

523530
bool kasan_slab_free(struct kmem_cache *cache, void *object)
524531
{
525-
#ifdef CONFIG_SLAB
526532
/* RCU slabs could be legally used after free within the RCU period */
527533
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
528534
return false;
529535

530536
if (likely(cache->flags & SLAB_KASAN)) {
531-
struct kasan_alloc_meta *alloc_info =
532-
get_alloc_info(cache, object);
533-
struct kasan_free_meta *free_info =
534-
get_free_info(cache, object);
537+
struct kasan_alloc_meta *alloc_info;
538+
struct kasan_free_meta *free_info;
539+
540+
alloc_info = get_alloc_info(cache, object);
541+
free_info = get_free_info(cache, object);
535542

536543
switch (alloc_info->state) {
537544
case KASAN_STATE_ALLOC:
@@ -550,10 +557,6 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
550557
}
551558
}
552559
return false;
553-
#else
554-
kasan_poison_slab_free(cache, object);
555-
return false;
556-
#endif
557560
}
558561

559562
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
@@ -576,7 +579,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
576579
kasan_unpoison_shadow(object, size);
577580
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
578581
KASAN_KMALLOC_REDZONE);
579-
#ifdef CONFIG_SLAB
580582
if (cache->flags & SLAB_KASAN) {
581583
struct kasan_alloc_meta *alloc_info =
582584
get_alloc_info(cache, object);
@@ -585,7 +587,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
585587
alloc_info->alloc_size = size;
586588
set_track(&alloc_info->track, flags);
587589
}
588-
#endif
589590
}
590591
EXPORT_SYMBOL(kasan_kmalloc);
591592

mm/kasan/kasan.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,6 @@ struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
9595
struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
9696
const void *object);
9797

98-
9998
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
10099
{
101100
return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
@@ -110,7 +109,7 @@ static inline bool kasan_report_enabled(void)
110109
void kasan_report(unsigned long addr, size_t size,
111110
bool is_write, unsigned long ip);
112111

113-
#ifdef CONFIG_SLAB
112+
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
114113
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
115114
void quarantine_reduce(void);
116115
void quarantine_remove_cache(struct kmem_cache *cache);

mm/kasan/report.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,6 @@ static inline bool init_task_stack_addr(const void *addr)
116116
sizeof(init_thread_union.stack));
117117
}
118118

119-
#ifdef CONFIG_SLAB
120119
static void print_track(struct kasan_track *track)
121120
{
122121
pr_err("PID = %u\n", track->pid);
@@ -130,8 +129,8 @@ static void print_track(struct kasan_track *track)
130129
}
131130
}
132131

133-
static void object_err(struct kmem_cache *cache, struct page *page,
134-
void *object, char *unused_reason)
132+
static void kasan_object_err(struct kmem_cache *cache, struct page *page,
133+
void *object, char *unused_reason)
135134
{
136135
struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
137136
struct kasan_free_meta *free_info;
@@ -162,7 +161,6 @@ static void object_err(struct kmem_cache *cache, struct page *page,
162161
break;
163162
}
164163
}
165-
#endif
166164

167165
static void print_address_description(struct kasan_access_info *info)
168166
{
@@ -177,7 +175,7 @@ static void print_address_description(struct kasan_access_info *info)
177175
struct kmem_cache *cache = page->slab_cache;
178176
object = nearest_obj(cache, page,
179177
(void *)info->access_addr);
180-
object_err(cache, page, object,
178+
kasan_object_err(cache, page, object,
181179
"kasan: bad access detected");
182180
return;
183181
}

mm/slab.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -369,6 +369,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
369369
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
370370
return s->object_size;
371371
# endif
372+
if (s->flags & SLAB_KASAN)
373+
return s->object_size;
372374
/*
373375
* If we have the need to store the freelist pointer
374376
* back there or track user information then we can

0 commit comments

Comments
 (0)