Skip to content

Commit 5474d33

Browse files
ftang1tehcaster
authored andcommitted
mm/slub: Improve redzone check and zeroing for krealloc()
For current krealloc(), one problem is its caller doesn't pass the old request size, say the object is 64 bytes kmalloc one, but caller may only requested 48 bytes. Then when krealloc() shrinks or grows in the same object, or allocate a new bigger object, it lacks this 'original size' information to do accurate data preserving or zeroing (when __GFP_ZERO is set). Thus with slub debug redzone and object tracking enabled, parts of the object after krealloc() might contain redzone data instead of zeroes, which is violating the __GFP_ZERO guarantees. Good thing is in this case, kmalloc caches do have this 'orig_size' feature. So solve the problem by utilize 'org_size' to do accurate data zeroing and preserving. [Thanks to syzbot and V, Narasimhan for discovering kfence and big kmalloc related issues in early patch version] Suggested-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Feng Tang <feng.tang@intel.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 9ef8568 commit 5474d33

File tree

1 file changed

+58
-22
lines changed

1 file changed

+58
-22
lines changed

mm/slub.c

Lines changed: 58 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -4718,34 +4718,66 @@ static __always_inline __realloc_size(2) void *
47184718
__do_krealloc(const void *p, size_t new_size, gfp_t flags)
47194719
{
47204720
void *ret;
4721-
size_t ks;
4721+
size_t ks = 0;
4722+
int orig_size = 0;
4723+
struct kmem_cache *s = NULL;
47224724

4723-
/* Check for double-free before calling ksize. */
4724-
if (likely(!ZERO_OR_NULL_PTR(p))) {
4725-
if (!kasan_check_byte(p))
4726-
return NULL;
4727-
ks = ksize(p);
4728-
} else
4729-
ks = 0;
4725+
if (unlikely(ZERO_OR_NULL_PTR(p)))
4726+
goto alloc_new;
47304727

4731-
/* If the object still fits, repoison it precisely. */
4732-
if (ks >= new_size) {
4733-
/* Zero out spare memory. */
4734-
if (want_init_on_alloc(flags)) {
4735-
kasan_disable_current();
4736-
memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
4737-
kasan_enable_current();
4728+
/* Check for double-free. */
4729+
if (!kasan_check_byte(p))
4730+
return NULL;
4731+
4732+
if (is_kfence_address(p)) {
4733+
ks = orig_size = kfence_ksize(p);
4734+
} else {
4735+
struct folio *folio;
4736+
4737+
folio = virt_to_folio(p);
4738+
if (unlikely(!folio_test_slab(folio))) {
4739+
/* Big kmalloc object */
4740+
WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE);
4741+
WARN_ON(p != folio_address(folio));
4742+
ks = folio_size(folio);
4743+
} else {
4744+
s = folio_slab(folio)->slab_cache;
4745+
orig_size = get_orig_size(s, (void *)p);
4746+
ks = s->object_size;
47384747
}
4748+
}
47394749

4740-
p = kasan_krealloc((void *)p, new_size, flags);
4741-
return (void *)p;
4750+
/* If the old object doesn't fit, allocate a bigger one */
4751+
if (new_size > ks)
4752+
goto alloc_new;
4753+
4754+
/* Zero out spare memory. */
4755+
if (want_init_on_alloc(flags)) {
4756+
kasan_disable_current();
4757+
if (orig_size && orig_size < new_size)
4758+
memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size);
4759+
else
4760+
memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
4761+
kasan_enable_current();
47424762
}
47434763

4764+
/* Setup kmalloc redzone when needed */
4765+
if (s && slub_debug_orig_size(s)) {
4766+
set_orig_size(s, (void *)p, new_size);
4767+
if (s->flags & SLAB_RED_ZONE && new_size < ks)
4768+
memset_no_sanitize_memory(kasan_reset_tag(p) + new_size,
4769+
SLUB_RED_ACTIVE, ks - new_size);
4770+
}
4771+
4772+
p = kasan_krealloc(p, new_size, flags);
4773+
return (void *)p;
4774+
4775+
alloc_new:
47444776
ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
47454777
if (ret && p) {
47464778
/* Disable KASAN checks as the object's redzone is accessed. */
47474779
kasan_disable_current();
4748-
memcpy(ret, kasan_reset_tag(p), ks);
4780+
memcpy(ret, kasan_reset_tag(p), orig_size ?: ks);
47494781
kasan_enable_current();
47504782
}
47514783

@@ -4766,16 +4798,20 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
47664798
* memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
47674799
* __GFP_ZERO is not fully honored by this API.
47684800
*
4769-
* This is the case, since krealloc() only knows about the bucket size of an
4770-
* allocation (but not the exact size it was allocated with) and hence
4771-
* implements the following semantics for shrinking and growing buffers with
4772-
* __GFP_ZERO.
4801+
* When slub_debug_orig_size() is off, krealloc() only knows about the bucket
4802+
* size of an allocation (but not the exact size it was allocated with) and
4803+
* hence implements the following semantics for shrinking and growing buffers
4804+
* with __GFP_ZERO.
47734805
*
47744806
* new bucket
47754807
* 0 size size
47764808
* |--------|----------------|
47774809
* | keep | zero |
47784810
*
4811+
* Otherwise, the original allocation size 'orig_size' could be used to
4812+
* precisely clear the requested size, and the new size will also be stored
4813+
* as the new 'orig_size'.
4814+
*
47794815
* In any case, the contents of the object pointed to are preserved up to the
47804816
* lesser of the new and old sizes.
47814817
*

0 commit comments

Comments
 (0)