2.24-2.35堆相关源码阅读笔记

源码阅读笔记计划之二!范围是2.24——2.35glibc的malloc.c源码(仅包含源码的变化部分的解析)
ps:偷了个懒只分析了我感觉有影响的变化(●ˇ∀ˇ●)

2.24

  • MALLOC_ALIGNMENT定义改变
    1
    2
    3
    4
    #ifndef MALLOC_ALIGNMENT
    # define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
    ? __alignof__ (long double) : 2 * SIZE_SZ)
    #endif
  • 增加了宏DUMPED_MAIN_ARENA_CHUNK(p),验证mmap的内存。
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    /* These variables are used for undumping support.  Chunked are marked
    as using mmap, but we leave them alone if they fall into this
    range. NB: The chunk size for these chunks only includes the
    initial size field (of SIZE_SZ bytes), there is no trailing size
    field (unlike with regular mmapped chunks). */
    static mchunkptr dumped_main_arena_start; /* Inclusive. */
    static mchunkptr dumped_main_arena_end; /* Exclusive. */

    /* True if the pointer falls into the dumped arena. Use this after
    chunk_is_mmapped indicates a chunk is mmapped. */
    #define DUMPED_MAIN_ARENA_CHUNK(p) \
    ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)

2.25

增加了一些方便使用的宏

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
/* Get size, ignoring use bits */
#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))

/* Like chunksize, but do not mask SIZE_BITS. */
#define chunksize_nomask(p) ((p)->mchunk_size)

/* Ptr to next physical malloc_chunk. */
#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))

/* Size of the chunk below P. Only valid if prev_inuse (P). */
#define prev_size(p) ((p)->mchunk_prev_size)

/* Set the size of the chunk below P. Only valid if prev_inuse (P). */
#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))

/* Ptr to previous physical malloc_chunk. Only valid if prev_inuse (P). */
#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))

2.26

tcache

tcache相关宏定义
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#if USE_TCACHE
/* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
# define TCACHE_MAX_BINS 64
# define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)

/* Only used to pre-fill the tunables. */
# define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)

/* When "x" is from chunksize(). */
# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
/* When "x" is a user-provided size. */
# define usize2tidx(x) csize2tidx (request2size (x))

/* With rounding and alignment, the bins are...
idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
idx 1 bytes 25..40 or 13..20
idx 2 bytes 41..56 or 21..28
etc. */

/* This is another arbitrary limit, which tunables can change. Each
tcache bin will hold at most this number of chunks. */
# define TCACHE_FILL_COUNT 7
#endif
tcache相关函数和结构体

tcache实现主要结构体tcache_perthread_struct放在堆的开头(也是一个chunk)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#if USE_TCACHE

/* We overlay this structure on the user-data portion of a chunk when
the chunk is stored in the per-thread cache. */
typedef struct tcache_entry
{
struct tcache_entry *next;
} tcache_entry;

typedef struct tcache_perthread_struct
{
char counts[TCACHE_MAX_BINS];
tcache_entry *entries[TCACHE_MAX_BINS];
} tcache_perthread_struct;

static __thread char tcache_shutting_down = 0;
static __thread tcache_perthread_struct *tcache = NULL;

static void
tcache_put (mchunkptr chunk, size_t tc_idx)
{
//tcache_entry指针指向的是chunk的data部分!!!
tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
assert (tc_idx < TCACHE_MAX_BINS);
e->next = tcache->entries[tc_idx];
tcache->entries[tc_idx] = e;
++(tcache->counts[tc_idx]);
}

/* Caller must ensure that we know tc_idx is valid and there's
available chunks to remove. */
static void *
tcache_get (size_t tc_idx)
{
tcache_entry *e = tcache->entries[tc_idx];
assert (tc_idx < TCACHE_MAX_BINS);
assert (tcache->entries[tc_idx] > 0);
//断言错误,本意是检查counts>0是否成立,可能会造成负数溢出
tcache->entries[tc_idx] = e->next;
--(tcache->counts[tc_idx]);
return (void *) e;
}

static void
tcache_init(void)
{
mstate ar_ptr;
void *victim = 0;
const size_t bytes = sizeof (tcache_perthread_struct);

if (tcache_shutting_down)
return;

arena_get (ar_ptr, bytes);
victim = _int_malloc (ar_ptr, bytes);
if (!victim && ar_ptr != NULL)
{
ar_ptr = arena_get_retry (ar_ptr, bytes);
victim = _int_malloc (ar_ptr, bytes);
}


if (ar_ptr != NULL)
__libc_lock_unlock (ar_ptr->mutex);

if (victim)
{
tcache = (tcache_perthread_struct *) victim;
memset (tcache, 0, sizeof (tcache_perthread_struct));
}

}

#define MAYBE_INIT_TCACHE() \
if (__glibc_unlikely (tcache == NULL)) \
tcache_init();

#else
#define MAYBE_INIT_TCACHE()
#endif
__libc_malloc函数变化

开头增加tcache初始化和如果tcache非空则从tcache中取chunk

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
void *
__libc_malloc (size_t bytes)
{
mstate ar_ptr;
void *victim;

void *(*hook) (size_t, const void *)
= atomic_forced_read (__malloc_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(bytes, RETURN_ADDRESS (0));
#if USE_TCACHE
/* int_free also calls request2size, be careful to not pad twice. */
size_t tbytes = request2size (bytes);//不安全
size_t tc_idx = csize2tidx (tbytes);

MAYBE_INIT_TCACHE ();

DIAG_PUSH_NEEDS_COMMENT;
if (tc_idx < mp_.tcache_bins
/*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */
&& tcache
&& tcache->entries[tc_idx] != NULL)
{
return tcache_get (tc_idx);
}
DIAG_POP_NEEDS_COMMENT;
#endif

……

MAYBE_INIT_TCACHE ();

ar_ptr = arena_for_chunk (p);
_int_free (ar_ptr, p, 0);
}
_int_malloc函数变化
fastbins的操作进行之前

如果chunk的大小符合要求,并且对应的bins还没装满,就将其放进去

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#define REMOVE_FB(fb, victim, pp)			\
do \
{ \
victim = pp; \
if (victim == NULL) \
break; \
} \
while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
!= victim); \

if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
{
idx = fastbin_index (nb);
mfastbinptr *fb = &fastbin (av, idx);
mchunkptr pp = *fb;
REMOVE_FB (fb, victim, pp);//返回victim(fastbin最后一个chunk)
if (victim != 0)
{
if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
{
errstr = "malloc(): memory corruption (fast)";
errout:
malloc_printerr (check_action, errstr, chunk2mem (victim), av);
return NULL;
}
check_remalloced_chunk (av, victim, nb);
#if USE_TCACHE
/* While we're here, if we see other chunks of the same size,
stash them in the tcache. */
size_t tc_idx = csize2tidx (nb);
if (tcache && tc_idx < mp_.tcache_bins)
{
mchunkptr tc_victim;

/* While bin not empty and tcache not full, copy chunks over. */
while (tcache->counts[tc_idx] < mp_.tcache_count
&& (pp = *fb) != NULL)
{
REMOVE_FB (fb, tc_victim, pp);//移动至fastbin最后一个chunk
if (tc_victim != 0)
{
tcache_put (tc_victim, tc_idx);//每次将fastbin最后一个chunk放进tcache
}
}
}
#endif
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}
}
small bin中同上
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
  if (in_smallbin_range (nb))
{
idx = smallbin_index (nb);
bin = bin_at (av, idx);

if ((victim = last (bin)) != bin)
{
if (victim == 0) /* initialization check */
malloc_consolidate (av);
else
{
bck = victim->bk;
if (__glibc_unlikely (bck->fd != victim))
{
errstr = "malloc(): smallbin double linked list corrupted";
goto errout;
}
set_inuse_bit_at_offset (victim, nb);
bin->bk = bck;
bck->fd = bin;

if (av != &main_arena)
set_non_main_arena (victim);
check_malloced_chunk (av, victim, nb);
#if USE_TCACHE
/* While we're here, if we see other chunks of the same size,
stash them in the tcache. */
size_t tc_idx = csize2tidx (nb);
if (tcache && tc_idx < mp_.tcache_bins)
{
mchunkptr tc_victim;

/* While bin not empty and tcache not full, copy chunks over. */
while (tcache->counts[tc_idx] < mp_.tcache_count
&& (tc_victim = last (bin)) != bin)
{
if (tc_victim != 0)
{
bck = tc_victim->bk;
set_inuse_bit_at_offset (tc_victim, nb);
if (av != &main_arena)
set_non_main_arena (tc_victim);
bin->bk = bck;
bck->fd = bin;

tcache_put (tc_victim, tc_idx);
}
}
}
#endif
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
}
}
}
for(;;)大循环之前

相关参量计算

1
2
3
4
5
6
7
8
9
#if USE_TCACHE
INTERNAL_SIZE_T tcache_nb = 0;
size_t tc_idx = csize2tidx (nb);
if (tcache && tc_idx < mp_.tcache_bins)
tcache_nb = nb;
int return_cached = 0;

tcache_unsorted_count = 0;
#endif
unsorted bin中遍历寻找相同大小chunk时
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
          if (size == nb)
{
set_inuse_bit_at_offset (victim, size);
if (av != &main_arena)
set_non_main_arena (victim);
#if USE_TCACHE
/* Fill cache first, return to user only if cache fills.
We may return one of these chunks later. */
if (tcache_nb
&& tcache->counts[tc_idx] < mp_.tcache_count)
//tcache没满就先将符合的chunk放进tcache,此时chunk已摘下
//可能暂时不会返回
{
tcache_put (victim, tc_idx);
return_cached = 1;
continue;
}
else
{
#endif
check_malloced_chunk (av, victim, nb);
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
#if USE_TCACHE
}
#endif
}
unsorted bin遍历末尾

tcache有可返回的chunk则返回

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#if USE_TCACHE
/* If we've processed as many chunks as we're allowed while
filling the cache, return one of the cached ones. */
++tcache_unsorted_count;
if (return_cached
&& mp_.tcache_unsorted_limit > 0
&& tcache_unsorted_count > mp_.tcache_unsorted_limit)
{
return tcache_get (tc_idx);
}
#endif

#define MAX_ITERS 10000
if (++iters >= MAX_ITERS)
break;
unsorted bin循环结束后

tcache有可返回的chunk则返回

1
2
3
4
5
6
7
#if USE_TCACHE
/* If all the small chunks we found ended up cached, return one now. */
if (return_cached)
{
return tcache_get (tc_idx);
}
#endif
_int_free函数变化

两个检查之后,如果tcache有符合的chunk就返回

1
2
3
4
5
6
7
8
9
10
11
12
13
#if USE_TCACHE
{
size_t tc_idx = csize2tidx (size);

if (tcache
&& tc_idx < mp_.tcache_bins
&& tcache->counts[tc_idx] < mp_.tcache_count)
{
tcache_put (p, tc_idx);
return;
}
}
#endif

unlink开头增加检查

1
2
3
4
5
6
7
8
9
10
#define unlink(AV, P, BK, FD) {                                            \
//size和next chunk的presize的检查
if (__builtin_expect (chunksize(P) != prev_size (next_chunk(P)), 0)) \
malloc_printerr (check_action, "corrupted size vs. prev_size", P, AV); \
FD = P->fd; \
BK = P->bk; \

……

}

2.27

  • checked_request2size更新
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    /* Same, except also perform an argument and result check.  First, we check
    that the padding done by request2size didn't result in an integer
    overflow. Then we check (using REQUEST_OUT_OF_RANGE) that the resulting
    size isn't so large that a later alignment would lead to another integer
    overflow. */
    #define checked_request2size(req, sz) \
    ({ \
    (sz) = request2size (req); \
    if (((sz) < (req)) \
    || REQUEST_OUT_OF_RANGE (sz)) \
    { \
    __set_errno (ENOMEM); \
    return 0; \
    } \
    })
  • get_max_fast更新
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    get_max_fast (void)
    {
    /* Tell the GCC optimizers that global_max_fast is never larger
    than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
    _int_malloc after constant propagation of the size parameter.
    (The code never executes because malloc preserves the
    global_max_fast invariant, but the optimizers may not recognize
    this.) */
    if (global_max_fast > MAX_FAST_SIZE)
    __builtin_unreachable ();
    return global_max_fast;
    }
  • malloc_state增加have_fastchunks
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    struct malloc_state
    {
    /* Serialize access. */
    __libc_lock_define (, mutex);

    /* Flags (formerly in max_fast). */
    int flags;

    /* Set if the fastbin chunks contain recently inserted free blocks. */
    /* Note this is a bool but not all targets support atomics on booleans. */
    int have_fastchunks;

    ……

    };
  • __libc_malloc开头,request2size更换为更安全的checked_request2size
    1
    2
    3
    4
    5
    #if USE_TCACHE
    /* int_free also calls request2size, be careful to not pad twice. */
    size_t tbytes;
    checked_request2size (bytes, tbytes);
    size_t tc_idx = csize2tidx (tbytes);
  • malloc_consolidate的每条fastbin链遍历开头检查chunk大小是否正确
    1
    2
    3
    4
    5
    {
    unsigned int idx = fastbin_index (chunksize (p));
    if ((&fastbin (av, idx)) != fb)
    malloc_printerr ("malloc_consolidate(): invalid chunk size");
    }

2.28

  • _int_malloc中整理unsorted bin摘除chunk时增加检查chunk的连接
    1
    2
    3
    4
    5
    /* remove from unsorted list */
    if (__glibc_unlikely (bck->fd != victim))
    malloc_printerr ("malloc(): corrupted unsorted chunks 3");
    unsorted_chunks (av)->bk = bck;
    bck->fd = unsorted_chunks (av);

2.29

  • tcache_entry增加key成员检查tcache的double free
    1
    2
    3
    4
    5
    6
    typedef struct tcache_entry
    {
    struct tcache_entry *next;
    /* This field exists to detect double frees. */
    struct tcache_perthread_struct *key;
    } tcache_entry;
  • tcache_put和tcache_get关于key成员的变化
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    tcache_put (mchunkptr chunk, size_t tc_idx)
    {
    tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
    assert (tc_idx < TCACHE_MAX_BINS);

    /* Mark this chunk as "in the tcache" so the test in _int_free will
    detect a double free. */
    e->key = tcache;//标记已在tcache中

    e->next = tcache->entries[tc_idx];
    tcache->entries[tc_idx] = e;
    ++(tcache->counts[tc_idx]);
    }

    /* Caller must ensure that we know tc_idx is valid and there's
    available chunks to remove. */
    static __always_inline void *
    tcache_get (size_t tc_idx)
    {
    tcache_entry *e = tcache->entries[tc_idx];
    assert (tc_idx < TCACHE_MAX_BINS);
    assert (tcache->entries[tc_idx] > 0);
    tcache->entries[tc_idx] = e->next;
    --(tcache->counts[tc_idx]);
    e->key = NULL;//标记已不在tcache中
    return (void *) e;
    }
  • _int_malloc函数中for(;;)大循环中unsorted bin遍历开头检查增加。检查size和nextchunk的presize是否相符;大小是否合法;前后连接是否正确;通过nextchunk的preinuse检查chunk是否inuse
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    bck = victim->bk;
    size = chunksize (victim);
    mchunkptr next = chunk_at_offset (victim, size);

    if (__glibc_unlikely (size <= 2 * SIZE_SZ)
    || __glibc_unlikely (size > av->system_mem))
    malloc_printerr ("malloc(): invalid size (unsorted)");
    if (__glibc_unlikely (chunksize_nomask (next) < 2 * SIZE_SZ)
    || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
    malloc_printerr ("malloc(): invalid next size (unsorted)");
    if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
    malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
    if (__glibc_unlikely (bck->fd != victim)
    || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
    malloc_printerr ("malloc(): unsorted double linked list corrupted");
    if (__glibc_unlikely (prev_inuse (next)))
    malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
  • use_top中检查top chunk大小是否合法
    1
    2
    3
    4
    5
    6
    7
    use_top:

    victim = av->top;
    size = chunksize (victim);

    if (__glibc_unlikely (size > av->system_mem))
    malloc_printerr ("malloc(): corrupted top size");
  • _int_free开头的tcache操作,增加double free的检查
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    #if USE_TCACHE
    {
    size_t tc_idx = csize2tidx (size);
    if (tcache != NULL && tc_idx < mp_.tcache_bins)
    {
    /* Check to see if it's already in the tcache. */
    tcache_entry *e = (tcache_entry *) chunk2mem (p);

    /* This test succeeds on double free. However, we don't 100%
    trust it (it also matches random payload data at a 1 in
    2^<size_t> chance), so verify it's not an unlikely
    coincidence before aborting. */
    if (__glibc_unlikely (e->key == tcache))
    {
    tcache_entry *tmp;
    LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
    for (tmp = tcache->entries[tc_idx];
    tmp;
    tmp = tmp->next)
    if (tmp == e)
    malloc_printerr ("free(): double free detected in tcache 2");
    /* If we get here, it was a coincidence. We've wasted a
    few cycles, but don't abort. */
    }

    if (tcache->counts[tc_idx] < mp_.tcache_count)
    {
    tcache_put (p, tc_idx);
    return;
    }
    }
    }
    #endif
  • _int_free的向下合并操作增加size和nextchunk的presize的检查,malloc_consolidate同
    1
    2
    3
    4
    5
    6
    7
    8
    9
    /* consolidate backward */
    if (!prev_inuse(p)) {
    prevsize = prev_size (p);
    size += prevsize;
    p = chunk_at_offset(p, -((long) prevsize));
    if (__glibc_unlikely (chunksize(p) != prevsize))
    malloc_printerr ("corrupted size vs. prev_size while consolidating");
    unlink_chunk (av, p);
    }

2.30

  • checked_request2size再次更新
    1
    2
    3
    4
    5
    6
    7
    checked_request2size (size_t req, size_t *sz) __nonnull (1)
    {
    if (__glibc_unlikely (req > PTRDIFF_MAX))
    return false;
    *sz = request2size (req);
    return true;
    }
  • tcache_put和tcache_get的断言全部删除
  • __libc_malloc中从tcache中取chunk时按counts决定tcache是否非空
    1
    2
    3
    4
    5
    6
    if (tc_idx < mp_.tcache_bins
    && tcache
    && tcache->counts[tc_idx] > 0)
    {
    return tcache_get (tc_idx);
    }
  • _int_malloc中将chunk放入large bin中时增加检查
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    else
    {
    victim->fd_nextsize = fwd;
    victim->bk_nextsize = fwd->bk_nextsize;
    if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
    malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
    fwd->bk_nextsize = victim;
    victim->bk_nextsize->fd_nextsize = victim;
    }
    bck = fwd->bk;
    if (bck->fd != fwd)
    malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");

2.31

2.32

  • 增加了对单链表的指针保护机制safe-linking(fastbin和tcache)和对申请地址的0x10对齐检查(错位找0x7f打fastbin不行了/(ㄒoㄒ)/~~)
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    #define PROTECT_PTR(pos, ptr) \
    ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
    #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)

    static __always_inline void
    tcache_put (mchunkptr chunk, size_t tc_idx)
    {
    tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
    e->key = tcache;
    e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
    //原fd^地址>>12
    //e->next = tcache->entries[tc_idx];(2.31源码)

    tcache->entries[tc_idx] = e;
    ++(tcache->counts[tc_idx]);
    }

    static __always_inline void *
    tcache_get (size_t tc_idx)
    {
    tcache_entry *e = tcache->entries[tc_idx];
    if (__glibc_unlikely (!aligned_OK (e)))
    malloc_printerr ("malloc(): unaligned tcache chunk detected");
    //从tcache中取chunk时的对齐检查,从fastbin中取chunk的对齐检查同理

    tcache->entries[tc_idx] = REVEAL_PTR (e->next);
    //现fd^地址>>12
    --(tcache->counts[tc_idx]);
    e->key = NULL;
    return (void *) e;
    }

2.33

  • 增加Memory tagging机制,没看懂但好像没大问题(
  • _int_free的tcache操作增加count超标检查
    1
    2
    3
    4
    5
    6
    7
    8
       size_t cnt = 0;
    LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
    for (tmp = tcache->entries[tc_idx];
    tmp;
    tmp = REVEAL_PTR (tmp->next), ++cnt)
    {
    if (cnt >= mp_.tcache_count)
    malloc_printerr ("free(): too many chunks detected in tcache");

2.34

  • 增加tcache_key机制,用随机数代替tcache标记检查double free
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    /* Process-wide key to try and catch a double-free in the same thread.  */
    static uintptr_t tcache_key;

    /* The value of tcache_key does not really have to be a cryptographically
    secure random number. It only needs to be arbitrary enough so that it does
    not collide with values present in applications. If a collision does happen
    consistently enough, it could cause a degradation in performance since the
    entire list is checked to check if the block indeed has been freed the
    second time. The odds of this happening are exceedingly low though, about 1
    in 2^wordsize. There is probably a higher chance of the performance
    degradation being due to a double free where the first free happened in a
    different thread; that's a case this check does not cover. */
    static void
    tcache_key_initialize (void)
    {
    if (__getrandom (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK)
    != sizeof (tcache_key))
    {
    tcache_key = random_bits ();
    #if __WORDSIZE == 64
    tcache_key = (tcache_key << 32) | random_bits ();
    #endif
    }
    }
  • 删除了所有的hook及其相关函数和利用/(ㄒoㄒ)/~~

2.35


2.24-2.35堆相关源码阅读笔记
http://akaieurus.github.io/2023/01/22/堆相关源码阅读笔记1/
作者
Eurus
发布于
2023年1月22日
许可协议