Note: IS_MMAPPED is intentionally not masked off from size field in macros for which mmapped chunks should never be seen. This should cause helpful core dumps to occur if it is tried by accident by people extending or adapting this malloc.
To help compensate for the large number of bins, a one-level index structure is used for bin-by-bin searching. binmap’ is a bitvector recording whether bins are definitely empty so they can be skipped over during during traversals. The bits are NOT always cleared as soon as bins are empty, but instead only when they are noticed to be empty during traversal in malloc.
staticvoid * _int_malloc (mstate av, size_t bytes)//参数为指针av,申请空间bytes,其函数声明 { INTERNAL_SIZE_T nb; /* normalized request size *///对齐后所需内存大小 unsignedint idx; /* associated bin index *///保存chunk在bins中的下标 mbinptr bin; /* associated bin *///保存bin
mchunkptr victim; /* inspected/selected chunk *///保存候选chunk INTERNAL_SIZE_T size; /* its size *///chunk的size int victim_index; /* its bin index *///候选chunk的index
mchunkptr remainder; /* remainder from a split *///从候选chunk分配内在后剩余的内存指针 unsignedlong remainder_size; /* its size *///剩余部分内存大小
unsignedint block; /* bit map traverser */ unsignedint bit; /* bit map traverser */ unsignedintmap; /* current word of binmap */
mchunkptr fwd; /* misc temp for linking */ mchunkptr bck; /* misc temp for linking */
/* If the size qualifies as a fastbin, first check corresponding bin. This code is safe to execute even if av is not yet initialized, so we can try it without checking, which saves some time on this fast path. */ //如果一个size在fast bins中的范围内,则从fast bins中分配,第一次调用malloc时,max_fat为0
/* If a small request, check regular bin. Since these "smallbins" hold one size each, no searching within bins is necessary. (For a large request, we need to wait until unsorted chunks are processed to find best fit. But for small ones, fits are exact anyway, so we can check now, which is faster.) */ //如果大小在small bins范围内,则在small bins中分配 if (in_smallbin_range (nb)) { idx = smallbin_index (nb); //所需大小对应的下标 bin = bin_at (av, idx); //从arena获取下标对应的bin
if ((victim = last (bin)) != bin) //如果候选chunk等于表头,表示该链表为空 { if (victim == 0) /* initialization check *///如果候选chunk为0,表示还没有创建双向循环链表 malloc_consolidate (av); //第一次malloc时会调用这个函数合并所有的fast bin else//候选chunk不为0,尝试将候选chunk从small bin中取出 { bck = victim->bk; if (__glibc_unlikely (bck->fd != victim)) { errstr = "malloc(): smallbin double linked list corrupted"; goto errout; } set_inuse_bit_at_offset (victim, nb); //设置候选chunk的inuse标志,该标志位于下一个chunk size位的第0个bit bin->bk = bck; //将bin从链表中取出,相当于unlink。 bck->fd = bin; ////最后一个chunk中的fd为main_arena+xxx
可以看到上面有一个相当于unlink的功能,其实small bin也有对应的small bin attack,需要伪造 一个heap地址来绕过bck->fd != victim,可以看到最后一个chunk中的fd为main_arena+xxx因为在伪造chunk时还需要libc地址,没什么利用价值,但是在高版本中比如glibc2.27和glibc2.29中malloc small bin操作不一样,可以任意地址写入libc地址。这个笔者在写后面的高版本glibc源码分析再看一下。
/* If this is a large request, consolidate fastbins before continuing. While it might look excessive to kill all fastbins before even seeing if there is space available, this avoids fragmentation problems normally associated with fastbins. Also, in practice, programs tend to have runs of either small or large requests, but less often mixtures, so consolidation is not invoked all that often in most programs. And the programs that it is called frequently in otherwise tend to fragment. */ //大小不属于small bins,则位于large bin中。在malloc源码中,把非small bin的部分定义为large bin else { idx = largebin_index (nb); //计算所需大小对应large bins的下标 if (have_fastchunks (av)) //判断是否存在属于fast bins的空闲chunk malloc_consolidate (av); //合并所有的fast bin }
/* Process recently freed or remaindered chunks, taking one only if it is exact fit, or, if this a small request, the chunk is remainder from the most recent non-exact fit. Place other traversed chunks in bins. Note that this step is the only place in any routine where chunks are placed in bins. The outer loop here is needed because we might not realize until near the end of malloc that we should have consolidated, so must do so and retry. This happens at most once, and only when we would otherwise need to expand memory to service a "small" request. */
/* If a small request, try to use last remainder if it is the only chunk in unsorted bin. This helps promote locality for runs of consecutive small requests. This is the only exception to best-fit, and applies only when there is no exact fit for a small chunk. */ //如果这个chunk大小属于small bins,且unsorted bins中只有一个chunk,且这个chunk为last remainder chunk,且这个chunk的大小大于所需要的size + MINSIZE if (in_smallbin_range (nb) && bck == unsorted_chunks (av) && victim == av->last_remainder && (unsignedlong) (size) > (unsignedlong) (nb + MINSIZE)) { /* split and reattach remainder *///从这个remainder中取出所需要的部分,也表头形成双向循环链表 remainder_size = size - nb; //取出剩余部分 remainder = chunk_at_offset (victim, nb); //获得chunk指针 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder; ///arena指向remainder av->last_remainder = remainder; //设置新的remainder remainder->bk = remainder->fd = unsorted_chunks (av); //remainder指向arena if (!in_smallbin_range (remainder_size)) //如果剩余部分不为small bins,则只能是large bins //因此需要将fd_nextsize和bk的那个清空,unsorted bin无需这两个 { remainder->fd_nextsize = NULL; remainder->bk_nextsize = NULL; } set_head (victim, nb | PREV_INUSE | //设置chunk的相关信息 (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head (remainder, remainder_size | PREV_INUSE); set_foot (remainder, remainder_size);
#define MAX_ITERS 10000 if (++iters >= MAX_ITERS) break; }
/* If a large request, scan through the chunks of current bin in sorted order to find smallest that fits. Use the skip list for this. */ //如果请求过大,扫描中的当前bin块按顺序排序以找到适合的最小值。为此使用跳过列表 //当处理完unsorted bins后,使用最佳匹配法匹配chunk
if (!in_smallbin_range (nb))//不存在于small bin中,那就是是否位于large bins中 { bin = bin_at (av, idx);
/* skip scan if empty or largest chunk is too small */ //判断large bins是否为空,以及链表中最大size是否满足所需大小 if ((victim = first (bin)) != bin && (unsignedlong) (victim->size) >= (unsignedlong) (nb)) { victim = victim->bk_nextsize; while (((unsignedlong) (size = chunksize (victim)) < (unsignedlong) (nb)))//循环寻找合适的chunk victim = victim->bk_nextsize;
/* Avoid removing the first entry for a size so that the skip list does not have to be rerouted. */ //为了尽量不破坏链表结构,尝试取出victim->fd作为候选chunk if (victim != last (bin) && victim->size == victim->fd->size)//判断是否前一个结点的size与申请空间相等 victim = victim->fd;
/* Search for a chunk by scanning bins, starting with next largest bin. This search is strictly by best-fit; i.e., the smallest (with ties going to approximately the least recently used) chunk that fits is selected. The bitmap avoids needing to check that most blocks are nonempty. The particular case of skipping all bins during warm-up phases when no chunks have been returned yet is faster than it might look. */ //在small bins和large bins中都没有找到大小合适的chunk,尝试从大小比所需大小更大的空闲chunk中寻找合适的 ++idx; bin = bin_at (av, idx); block = idx2block (idx); map = av->binmap[block]; bit = idx2bit (idx);
for (;; ) { /* Skip rest of block if there are no more set bits in this block. */ //如果该块中没有更多的设置位,则跳过该块的其余部分 if (bit > map || bit == 0) { do { if (++block >= BINMAPSIZE) /* out of bins *///没有合适的chunk,尝试使用top chunk分配 goto use_top; } while ((map = av->binmap[block]) == 0); //bin指向block的第一个bit对应的bin bin = bin_at (av, (block << BINMAPSHIFT)); bit = 1; }
/* Advance to bin with set bit. There must be one. */ //在block中遍历对应的bin,直到找到一个不为0的bit while ((bit & map) == 0) { bin = next_bin (bin); bit <<= 1; assert (bit != 0); }
/* Inspect the bin. It is likely to be non-empty */ //将chunk加入链表尾 victim = last (bin);
/* If a false alarm (empty bin), clear the bit. */ //若victim与bin链表头指针相同,表示该bin中没有空闲chunk if (victim == bin) { av->binmap[block] = map &= ~bit; /* Write through */ bin = next_bin (bin); bit <<= 1; }
else { size = chunksize (victim);
/* We know the first chunk in this bin is big enough to use. */ //chunk大小是否满足 assert ((unsignedlong) (size) >= (unsignedlong) (nb));
use_top: /* If large enough, split off the chunk bordering the end of memory (held in av->top). Note that this is in accord with the best-fit search rule. In effect, av->top is treated as larger (and thus less well fitting) than any other available chunk since it can be extended to be as large as necessary (up to system limitations). We require that av->top always exists (i.e., has size >= MINSIZE) after initialization, so if it would otherwise be exhausted by current request, it is replenished. (The main reason for ensuring it exists is that we may need MINSIZE space to put in fenceposts in sysmalloc.) */ //获得top chunk指针与大小 victim = av->top; size = chunksize (victim); //满足size > nb + minsize才能分配 //这里的size为unsigned long当size为-1时,size会变得特别大,会符合条件,再控制下面的指针就可以任意地址写 if ((unsignedlong) (size) >= (unsignedlong) (nb + MINSIZE)) { remainder_size = size - nb; remainder = chunk_at_offset (victim, nb); av->top = remainder; set_head (victim, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head (remainder, remainder_size | PREV_INUSE);
/* When we are using atomic ops to free fast chunks we can get here for all block sizes. */ //topchunk也无法满足要求则检查fast bins中是否存在空闲的chunk elseif (have_fastchunks (av)) { malloc_consolidate (av); /* restore original bin index */ //从small bin和large bins获取下标 if (in_smallbin_range (nb)) idx = smallbin_index (nb); else idx = largebin_index (nb); }
size = chunksize (p);//获得目标chunk的size字段的值 /* Little security check which won't hurt performance: the allocator never wrapps around at the end of the address space. Therefore we can exclude some size values which might appear here by accident or by "design" from some intruder. */ //安全检查,检查chunk指针合法性,是否对齐等,不会影响性能 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) || __builtin_expect (misaligned_chunk (p), 0)) { errstr = "free(): invalid pointer"; errout: if (!have_lock && locked) (void) mutex_unlock (&av->mutex); malloc_printerr (check_action, errstr, chunk2mem (p), av); return; } /* We know that each chunk is at least MINSIZE bytes in size or a multiple of MALLOC_ALIGNMENT. */ //size是否小于chunk的最小size,以及是否对齐 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) { errstr = "free(): invalid size"; goto errout; } //chunk是否在空闲状态(检查下一个chunk的p位) check_inuse_chunk(av, p);
/* If eligible, place chunk on a fastbin so it can be found and used quickly in malloc. */ //因为是fastbin,所以这里判断了一下size是否小于0x80 if ((unsignedlong)(size) <= (unsignedlong)(get_max_fast ())
#if TRIM_FASTBINS /* If TRIM_FASTBINS set, don't place chunks bordering top into fastbins */ //如果设置了TRIM_FASTBINS,就不能将与top chunk相邻的chunk放入fast bins && (chunk_at_offset(p, size) != av->top) #endif ) {
if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0) || __builtin_expect (chunksize (chunk_at_offset (p, size)) >= av->system_mem, 0)) { /* We might not have a lock at this point and concurrent modifications of system_mem might have let to a false positive. Redo the test after getting the lock. */ if (have_lock || ({ assert (locked == 0); mutex_lock(&av->mutex); locked = 1; chunk_at_offset (p, size)->size <= 2 * SIZE_SZ || chunksize (chunk_at_offset (p, size)) >= av->system_mem; })) { errstr = "free(): invalid next size (fast)"; goto errout; } if (! have_lock) { (void)mutex_unlock(&av->mutex); locked = 0; } }
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ mchunkptr old = *fb, old2; unsignedint old_idx = ~0u; do {//检测double free,这里可以被绕过,free(1)free(2)free(1) /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect (old == p, 0)) { errstr = "double free or corruption (fasttop)"; goto errout; } /* Check that size of fastbin chunk at the top is the same as size of the chunk that we are adding. We can dereference OLD only if we have the lock, otherwise it might have already been deallocated. See use of OLD_IDX below for the actual check. */ if (have_lock && old != NULL) old_idx = fastbin_index(chunksize(old)); p->fd = old2 = old; } while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
/* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */ //得到arena中的bins指针 bck = unsorted_chunks(av); fwd = bck->fd;//指向最后一个进入unsorted bins的指针 if (__glibc_unlikely (fwd->bk != bck))//最后一个堆块bk不指向bins的话,触发异常 { errstr = "free(): corrupted unsorted chunks"; goto errout; }//chunk进入unsorted bins p->fd = fwd; p->bk = bck; if (!in_smallbin_range(size))//chunk为large bins,将fd_nextsize和bk_nextsize设置为null { p->fd_nextsize = NULL; p->bk_nextsize = NULL; } bck->fd = p; fwd->bk = p;
/* If freeing a large space, consolidate possibly-surrounding chunks. Then, if the total unused topmost memory exceeds trim threshold, ask malloc_trim to reduce top. Unless max_fast is 0, we don't know if there are fastbins bordering top, so we cannot tell for sure whether threshold has been reached unless fastbins are consolidated. But we don't want to consolidate on each free. As a compromise, consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */ //如果size大于FASTBIN_CONSOLIDATION_THRESHOLD,则合并堆中所有空闲的fast bin if ((unsignedlong)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { if (have_fastchunks(av)) malloc_consolidate(av); //如果为main_arena,且top chunk超过阈值,则切割arena if (av == &main_arena) { #ifndef MORECORE_CANNOT_TRIM if ((unsignedlong)(chunksize(av->top)) >= (unsignedlong)(mp_.trim_threshold)) systrim(mp_.top_pad, av); #endif } else { /* Always try heap_trim(), even if the top chunk is not large, because the corresponding heap might go away. */ heap_info *heap = heap_for_ptr(top(av));