Skip to content

Commit 9ce6739

Browse files
ftang1tehcaster
authored andcommitted
mm/slub: only zero requested size of buffer for kzalloc when debug enabled
kzalloc/kmalloc will round up the request size to a fixed size (mostly power of 2), so the allocated memory could be more than requested. Currently kzalloc family APIs will zero all the allocated memory. To detect out-of-bound usage of the extra allocated memory, only zero the requested part, so that redzone sanity check could be added to the extra space later. For kzalloc users who will call ksize() later and utilize this extra space, please be aware that the space is not zeroed any more when debug is enabled. (Thanks to Kees Cook's effort to sanitize all ksize() user cases [1], this won't be a big issue). [1]. https://lore.kernel.org/all/[email protected]/#r Signed-off-by: Feng Tang <[email protected]> Acked-by: Hyeonggon Yoo <[email protected]> Reviewed-by: Andrey Konovalov <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent c18c20f commit 9ce6739

File tree

3 files changed

+27
-8
lines changed

3 files changed

+27
-8
lines changed

mm/slab.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3254,7 +3254,8 @@ slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
32543254
init = slab_want_init_on_alloc(flags, cachep);
32553255

32563256
out:
3257-
slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init);
3257+
slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init,
3258+
cachep->object_size);
32583259
return objp;
32593260
}
32603261

@@ -3507,13 +3508,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
35073508
* Done outside of the IRQ disabled section.
35083509
*/
35093510
slab_post_alloc_hook(s, objcg, flags, size, p,
3510-
slab_want_init_on_alloc(flags, s));
3511+
slab_want_init_on_alloc(flags, s), s->object_size);
35113512
/* FIXME: Trace call missing. Christoph would like a bulk variant */
35123513
return size;
35133514
error:
35143515
local_irq_enable();
35153516
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3516-
slab_post_alloc_hook(s, objcg, flags, i, p, false);
3517+
slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
35173518
kmem_cache_free_bulk(s, i, p);
35183519
return 0;
35193520
}

mm/slab.h

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -720,12 +720,26 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
720720

721721
static inline void slab_post_alloc_hook(struct kmem_cache *s,
722722
struct obj_cgroup *objcg, gfp_t flags,
723-
size_t size, void **p, bool init)
723+
size_t size, void **p, bool init,
724+
unsigned int orig_size)
724725
{
726+
unsigned int zero_size = s->object_size;
725727
size_t i;
726728

727729
flags &= gfp_allowed_mask;
728730

731+
/*
732+
* For kmalloc object, the allocated memory size(object_size) is likely
733+
* larger than the requested size(orig_size). If redzone check is
734+
* enabled for the extra space, don't zero it, as it will be redzoned
735+
* soon. The redzone operation for this extra space could be seen as a
736+
* replacement of current poisoning under certain debug option, and
737+
* won't break other sanity checks.
738+
*/
739+
if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
740+
(s->flags & SLAB_KMALLOC))
741+
zero_size = orig_size;
742+
729743
/*
730744
* As memory initialization might be integrated into KASAN,
731745
* kasan_slab_alloc and initialization memset must be
@@ -736,7 +750,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
736750
for (i = 0; i < size; i++) {
737751
p[i] = kasan_slab_alloc(s, p[i], flags, init);
738752
if (p[i] && init && !kasan_has_integrated_init())
739-
memset(p[i], 0, s->object_size);
753+
memset(p[i], 0, zero_size);
740754
kmemleak_alloc_recursive(p[i], s->object_size, 1,
741755
s->flags, flags);
742756
kmsan_slab_alloc(s, p[i], flags);

mm/slub.c

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3395,7 +3395,11 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l
33953395
init = slab_want_init_on_alloc(gfpflags, s);
33963396

33973397
out:
3398-
slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init);
3398+
/*
3399+
* When init equals 'true', like for kzalloc() family, only
3400+
* @orig_size bytes might be zeroed instead of s->object_size
3401+
*/
3402+
slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size);
33993403

34003404
return object;
34013405
}
@@ -3852,11 +3856,11 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
38523856
* Done outside of the IRQ disabled fastpath loop.
38533857
*/
38543858
slab_post_alloc_hook(s, objcg, flags, size, p,
3855-
slab_want_init_on_alloc(flags, s));
3859+
slab_want_init_on_alloc(flags, s), s->object_size);
38563860
return i;
38573861
error:
38583862
slub_put_cpu_ptr(s->cpu_slab);
3859-
slab_post_alloc_hook(s, objcg, flags, i, p, false);
3863+
slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
38603864
kmem_cache_free_bulk(s, i, p);
38613865
return 0;
38623866
}

0 commit comments

Comments
 (0)