@@ -351,7 +351,6 @@ void kasan_free_pages(struct page *page, unsigned int order)
351351 KASAN_FREE_PAGE );
352352}
353353
354- #ifdef CONFIG_SLAB
355354/*
356355 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
357356 * For larger allocations larger redzones are used.
@@ -373,16 +372,8 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
373372 unsigned long * flags )
374373{
375374 int redzone_adjust ;
376- /* Make sure the adjusted size is still less than
377- * KMALLOC_MAX_CACHE_SIZE.
378- * TODO: this check is only useful for SLAB, but not SLUB. We'll need
379- * to skip it for SLUB when it starts using kasan_cache_create().
380- */
381- if (* size > KMALLOC_MAX_CACHE_SIZE -
382- sizeof (struct kasan_alloc_meta ) -
383- sizeof (struct kasan_free_meta ))
384- return ;
385- * flags |= SLAB_KASAN ;
375+ int orig_size = * size ;
376+
386377 /* Add alloc meta. */
387378 cache -> kasan_info .alloc_meta_offset = * size ;
388379 * size += sizeof (struct kasan_alloc_meta );
@@ -395,14 +386,26 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
395386 }
396387 redzone_adjust = optimal_redzone (cache -> object_size ) -
397388 (* size - cache -> object_size );
389+
398390 if (redzone_adjust > 0 )
399391 * size += redzone_adjust ;
400- * size = min (KMALLOC_MAX_CACHE_SIZE ,
401- max (* size ,
402- cache -> object_size +
403- optimal_redzone (cache -> object_size )));
392+
393+ * size = min (KMALLOC_MAX_SIZE , max (* size , cache -> object_size +
394+ optimal_redzone (cache -> object_size )));
395+
396+ /*
397+ * If the metadata doesn't fit, don't enable KASAN at all.
398+ */
399+ if (* size <= cache -> kasan_info .alloc_meta_offset ||
400+ * size <= cache -> kasan_info .free_meta_offset ) {
401+ cache -> kasan_info .alloc_meta_offset = 0 ;
402+ cache -> kasan_info .free_meta_offset = 0 ;
403+ * size = orig_size ;
404+ return ;
405+ }
406+
407+ * flags |= SLAB_KASAN ;
404408}
405- #endif
406409
407410void kasan_cache_shrink (struct kmem_cache * cache )
408411{
@@ -414,6 +417,14 @@ void kasan_cache_destroy(struct kmem_cache *cache)
414417 quarantine_remove_cache (cache );
415418}
416419
420+ size_t kasan_metadata_size (struct kmem_cache * cache )
421+ {
422+ return (cache -> kasan_info .alloc_meta_offset ?
423+ sizeof (struct kasan_alloc_meta ) : 0 ) +
424+ (cache -> kasan_info .free_meta_offset ?
425+ sizeof (struct kasan_free_meta ) : 0 );
426+ }
427+
417428void kasan_poison_slab (struct page * page )
418429{
419430 kasan_poison_shadow (page_address (page ),
@@ -431,16 +442,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
431442 kasan_poison_shadow (object ,
432443 round_up (cache -> object_size , KASAN_SHADOW_SCALE_SIZE ),
433444 KASAN_KMALLOC_REDZONE );
434- #ifdef CONFIG_SLAB
435445 if (cache -> flags & SLAB_KASAN ) {
436446 struct kasan_alloc_meta * alloc_info =
437447 get_alloc_info (cache , object );
438448 alloc_info -> state = KASAN_STATE_INIT ;
439449 }
440- #endif
441450}
442451
443- #ifdef CONFIG_SLAB
444452static inline int in_irqentry_text (unsigned long ptr )
445453{
446454 return (ptr >= (unsigned long )& __irqentry_text_start &&
@@ -501,7 +509,6 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
501509 BUILD_BUG_ON (sizeof (struct kasan_free_meta ) > 32 );
502510 return (void * )object + cache -> kasan_info .free_meta_offset ;
503511}
504- #endif
505512
506513void kasan_slab_alloc (struct kmem_cache * cache , void * object , gfp_t flags )
507514{
@@ -522,16 +529,16 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
522529
523530bool kasan_slab_free (struct kmem_cache * cache , void * object )
524531{
525- #ifdef CONFIG_SLAB
526532 /* RCU slabs could be legally used after free within the RCU period */
527533 if (unlikely (cache -> flags & SLAB_DESTROY_BY_RCU ))
528534 return false;
529535
530536 if (likely (cache -> flags & SLAB_KASAN )) {
531- struct kasan_alloc_meta * alloc_info =
532- get_alloc_info (cache , object );
533- struct kasan_free_meta * free_info =
534- get_free_info (cache , object );
537+ struct kasan_alloc_meta * alloc_info ;
538+ struct kasan_free_meta * free_info ;
539+
540+ alloc_info = get_alloc_info (cache , object );
541+ free_info = get_free_info (cache , object );
535542
536543 switch (alloc_info -> state ) {
537544 case KASAN_STATE_ALLOC :
@@ -550,10 +557,6 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
550557 }
551558 }
552559 return false;
553- #else
554- kasan_poison_slab_free (cache , object );
555- return false;
556- #endif
557560}
558561
559562void kasan_kmalloc (struct kmem_cache * cache , const void * object , size_t size ,
@@ -576,7 +579,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
576579 kasan_unpoison_shadow (object , size );
577580 kasan_poison_shadow ((void * )redzone_start , redzone_end - redzone_start ,
578581 KASAN_KMALLOC_REDZONE );
579- #ifdef CONFIG_SLAB
580582 if (cache -> flags & SLAB_KASAN ) {
581583 struct kasan_alloc_meta * alloc_info =
582584 get_alloc_info (cache , object );
@@ -585,7 +587,6 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
585587 alloc_info -> alloc_size = size ;
586588 set_track (& alloc_info -> track , flags );
587589 }
588- #endif
589590}
590591EXPORT_SYMBOL (kasan_kmalloc );
591592
0 commit comments