summaryrefslogtreecommitdiff
path: root/source/luametatex/source/libraries/mimalloc/src/alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'source/luametatex/source/libraries/mimalloc/src/alloc.c')
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/alloc.c258
1 files changed, 141 insertions, 117 deletions
diff --git a/source/luametatex/source/libraries/mimalloc/src/alloc.c b/source/luametatex/source/libraries/mimalloc/src/alloc.c
index 1a36b5da8..348218246 100644
--- a/source/luametatex/source/libraries/mimalloc/src/alloc.c
+++ b/source/luametatex/source/libraries/mimalloc/src/alloc.c
@@ -12,6 +12,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include "mimalloc-internal.h"
#include "mimalloc-atomic.h"
+
#include <string.h> // memset, strlen
#include <stdlib.h> // malloc, exit
@@ -25,11 +26,11 @@ terms of the MIT license. A copy of the license can be found in the file
// Fast allocation in a page: just pop from the free list.
// Fall back to generic allocation only if the list is empty.
-extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
+extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
mi_block_t* const block = page->free;
- if (mi_unlikely(block == NULL)) {
- return _mi_malloc_generic(heap, size);
+ if mi_unlikely(block == NULL) {
+ return _mi_malloc_generic(heap, size, zero);
}
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
// pop from the free list
@@ -37,10 +38,22 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
page->free = mi_block_next(page, block);
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
-#if (MI_DEBUG>0)
- if (!page->is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
+ // allow use of the block internally
+ // note: when tracking we need to avoid ever touching the MI_PADDING since
+ // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc-track.h`)
+ mi_track_mem_undefined(block, mi_page_usable_block_size(page));
+
+ // zero the block? note: we need to zero the full block size (issue #63)
+ if mi_unlikely(zero) {
+ mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
+ const size_t zsize = (page->is_zero ? sizeof(block->next) + MI_PADDING_SIZE : page->xblock_size);
+ _mi_memzero_aligned(block, zsize - MI_PADDING_SIZE);
+ }
+
+#if (MI_DEBUG>0) && !MI_TRACK_ENABLED
+ if (!page->is_zero && !zero) { memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); }
#elif (MI_SECURE!=0)
- block->next = 0; // don't leak internal data
+ if (!zero) { block->next = 0; } // don't leak internal data
#endif
#if (MI_STAT>0)
@@ -55,10 +68,13 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
}
#endif
-#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST)
+#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
+ #if (MI_DEBUG>1)
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
+ mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
+ #endif
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
padding->delta = (uint32_t)(delta);
uint8_t* fill = (uint8_t*)padding - delta;
@@ -69,8 +85,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
return block;
}
-// allocate a small block
-extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
mi_assert(size <= MI_SMALL_SIZE_MAX);
@@ -80,7 +95,7 @@ extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_
}
#endif
mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE);
- void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE);
+ void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
if (p != NULL) {
@@ -88,22 +103,28 @@ extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
+ mi_track_malloc(p,size,zero);
return p;
}
-extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
+// allocate a small block
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+ return mi_heap_malloc_small_zero(heap, size, false);
+}
+
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
return mi_heap_malloc_small(mi_get_default_heap(), size);
}
// The main allocation function
-extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
- if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
- return mi_heap_malloc_small(heap, size);
+extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
+ if mi_likely(size <= MI_SMALL_SIZE_MAX) {
+ return mi_heap_malloc_small_zero(heap, size, zero);
}
else {
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
- void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE); // note: size can overflow but it is detected in malloc_generic
+ void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero); // note: size can overflow but it is detected in malloc_generic
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
if (p != NULL) {
@@ -111,55 +132,29 @@ extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
+ mi_track_malloc(p,size,zero);
return p;
}
}
-extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
- return mi_heap_malloc(mi_get_default_heap(), size);
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+ return _mi_heap_malloc_zero(heap, size, false);
}
-
-void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
- // note: we need to initialize the whole usable block size to zero, not just the requested size,
- // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
- MI_UNUSED(size);
- mi_assert_internal(p != NULL);
- mi_assert_internal(mi_usable_size(p) >= size); // size can be zero
- mi_assert_internal(_mi_ptr_page(p)==page);
- if (page->is_zero && size > sizeof(mi_block_t)) {
- // already zero initialized memory
- ((mi_block_t*)p)->next = 0; // clear the free list pointer
- mi_assert_expensive(mi_mem_is_zero(p, mi_usable_size(p)));
- }
- else {
- // otherwise memset
- memset(p, 0, mi_usable_size(p));
- }
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
+ return mi_heap_malloc(mi_get_default_heap(), size);
}
// zero initialized small block
-mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
- void* p = mi_malloc_small(size);
- if (p != NULL) {
- _mi_block_zero_init(_mi_ptr_page(p), p, size); // todo: can we avoid getting the page again?
- }
- return p;
-}
-
-void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
- void* p = mi_heap_malloc(heap,size);
- if (zero && p != NULL) {
- _mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
- }
- return p;
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
+ return mi_heap_malloc_small_zero(mi_get_default_heap(), size, true);
}
-extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_zero(heap, size, true);
}
-mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
return mi_heap_zalloc(mi_get_default_heap(),size);
}
@@ -192,16 +187,19 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
return false;
}
+#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }
+
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
+ bool is_double_free = false;
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
{
// Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
// (continue in separate function to improve code generation)
- return mi_check_is_double_freex(page, block);
+ is_double_free = mi_check_is_double_freex(page, block);
}
- return false;
+ return is_double_free;
}
#else
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
@@ -215,12 +213,19 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
// Check for heap block overflow by setting up padding at the end of the block
// ---------------------------------------------------------------------------
-#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
+#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
*bsize = mi_page_usable_block_size(page);
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
+ mi_track_mem_defined(padding,sizeof(mi_padding_t));
*delta = padding->delta;
- return ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize);
+ uint32_t canary = padding->canary;
+ uintptr_t keys[2];
+ keys[0] = page->keys[0];
+ keys[1] = page->keys[1];
+ bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);
+ mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
+ return ok;
}
// Return the exact usable size of a block.
@@ -242,13 +247,16 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si
*size = bsize - delta;
uint8_t* fill = (uint8_t*)block + bsize - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
+ mi_track_mem_defined(fill,maxpad);
for (size_t i = 0; i < maxpad; i++) {
if (fill[i] != MI_DEBUG_PADDING) {
*wrong = bsize - delta + i;
- return false;
+ ok = false;
+ break;
}
}
- return true;
+ mi_track_mem_noaccess(fill,maxpad);
+ return ok;
}
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
@@ -347,14 +355,14 @@ static void mi_stat_huge_free(const mi_page_t* page) {
// Free
// ------------------------------------------------------
-// multi-threaded free
+// multi-threaded free (or free in huge block)
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{
// The padding check may access the non-thread-owned page for the key values.
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block);
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
- #if (MI_DEBUG!=0)
+ #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
#endif
@@ -372,7 +380,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
do {
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
- if (mi_unlikely(use_delayed)) {
+ if mi_unlikely(use_delayed) {
// unlikely: this only happens on the first concurrent free in a page that is in the full list
tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
}
@@ -383,7 +391,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
}
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
- if (mi_unlikely(use_delayed)) {
+ if mi_unlikely(use_delayed) {
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
mi_assert_internal(heap != NULL);
@@ -409,20 +417,21 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
{
// and push it on the free list
- if (mi_likely(local)) {
+ //const size_t bsize = mi_page_block_size(page);
+ if mi_likely(local) {
// owning thread can free a block directly
- if (mi_unlikely(mi_check_is_double_free(page, block))) return;
+ if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
- #if (MI_DEBUG!=0)
+ #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
page->used--;
- if (mi_unlikely(mi_page_all_free(page))) {
+ if mi_unlikely(mi_page_all_free(page)) {
_mi_page_retire(page);
}
- else if (mi_unlikely(mi_page_is_in_full(page))) {
+ else if mi_unlikely(mi_page_is_in_full(page)) {
_mi_page_unfull(page);
}
}
@@ -444,7 +453,8 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p
static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) mi_attr_noexcept {
mi_page_t* const page = _mi_segment_page_of(segment, p);
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
- mi_stat_free(page, block);
+ mi_stat_free(page, block); // stat_free may access the padding
+ mi_track_free(p);
_mi_free_block(page, local, block);
}
@@ -455,26 +465,26 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
{
MI_UNUSED(msg);
#if (MI_DEBUG>0)
- if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) {
+ if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
return NULL;
}
#endif
mi_segment_t* const segment = _mi_ptr_segment(p);
- if (mi_unlikely(segment == NULL)) return NULL; // checks also for (p==NULL)
+ if mi_unlikely(segment == NULL) return NULL; // checks also for (p==NULL)
#if (MI_DEBUG>0)
- if (mi_unlikely(!mi_is_in_heap_region(p))) {
+ if mi_unlikely(!mi_is_in_heap_region(p)) {
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
- if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
+ if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
}
}
#endif
#if (MI_DEBUG>0 || MI_SECURE>=4)
- if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
+ if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
return NULL;
}
@@ -486,23 +496,24 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
void mi_free(void* p) mi_attr_noexcept
{
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
- if (mi_unlikely(segment == NULL)) return;
+ if mi_unlikely(segment == NULL) return;
mi_threadid_t tid = _mi_thread_id();
mi_page_t* const page = _mi_segment_page_of(segment, p);
- if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
+ if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
mi_block_t* block = (mi_block_t*)(p);
- if (mi_unlikely(mi_check_is_double_free(page,block))) return;
+ if mi_unlikely(mi_check_is_double_free(page,block)) return;
mi_check_padding(page, block);
mi_stat_free(page, block);
- #if (MI_DEBUG!=0)
+ #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
+ mi_track_free(p);
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
- if (mi_unlikely(--page->used == 0)) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
+ if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
_mi_page_retire(page);
}
}
@@ -513,6 +524,7 @@ void mi_free(void* p) mi_attr_noexcept
}
}
+// return true if successful
bool _mi_free_delayed_block(mi_block_t* block) {
// get segment and page
const mi_segment_t* const segment = _mi_ptr_segment(block);
@@ -525,7 +537,9 @@ bool _mi_free_delayed_block(mi_block_t* block) {
// some blocks may end up in the page `thread_free` list with no blocks in the
// heap `thread_delayed_free` list which may cause the page to be never freed!
// (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
- _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */);
+ if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) {
+ return false;
+ }
// collect all other non-local frees to ensure up-to-date `used` count
_mi_page_free_collect(page, false);
@@ -548,7 +562,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
if (segment==NULL) return 0; // also returns 0 if `p == NULL`
const mi_page_t* const page = _mi_segment_page_of(segment, p);
- if (mi_likely(!mi_page_has_aligned(page))) {
+ if mi_likely(!mi_page_has_aligned(page)) {
const mi_block_t* block = (const mi_block_t*)p;
return mi_page_usable_size_of(page, block);
}
@@ -558,7 +572,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
}
}
-size_t mi_usable_size(const void* p) mi_attr_noexcept {
+mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
return _mi_usable_size(p, "mi_usable_size");
}
@@ -570,6 +584,7 @@ size_t mi_usable_size(const void* p) mi_attr_noexcept {
#ifdef __cplusplus
void* _mi_externs[] = {
(void*)&_mi_page_malloc,
+ (void*)&_mi_heap_malloc_zero,
(void*)&mi_malloc,
(void*)&mi_malloc_small,
(void*)&mi_zalloc_small,
@@ -602,24 +617,24 @@ void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
mi_free(p);
}
-extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count,size,&total)) return NULL;
return mi_heap_zalloc(heap,total);
}
-mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
return mi_heap_calloc(mi_get_default_heap(),count,size);
}
// Uninitialized `calloc`
-extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_malloc(heap, total);
}
-mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
return mi_heap_mallocn(mi_get_default_heap(),count,size);
}
@@ -638,31 +653,40 @@ void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
}
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept {
- const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL
- if (mi_unlikely(newsize <= size && newsize >= (size / 2))) {
+ // if p == NULL then behave as malloc.
+ // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
+ // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
+ const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
+ if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
// todo: adjust potential padding to reflect the new size?
+ mi_track_free(p);
+ mi_track_malloc(p,newsize,true);
return p; // reallocation still fits and not more than 50% waste
}
void* newp = mi_heap_malloc(heap,newsize);
- if (mi_likely(newp != NULL)) {
+ if mi_likely(newp != NULL) {
if (zero && newsize > size) {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
memset((uint8_t*)newp + start, 0, newsize - start);
}
- if (mi_likely(p != NULL)) {
- _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
+ if mi_likely(p != NULL) {
+ if mi_likely(_mi_is_aligned(p, sizeof(uintptr_t))) { // a client may pass in an arbitrary pointer `p`..
+ const size_t copysize = (newsize > size ? size : newsize);
+ mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking..
+ _mi_memcpy_aligned(newp, p, copysize);
+ }
mi_free(p); // only free the original pointer if successful
}
}
return newp;
}
-void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, false);
}
-void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_realloc(heap, p, total);
@@ -670,41 +694,41 @@ void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_a
// Reallocate but free `p` on errors
-void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
void* newp = mi_heap_realloc(heap, p, newsize);
if (newp==NULL && p!=NULL) mi_free(p);
return newp;
}
-void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, true);
}
-void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_rezalloc(heap, p, total);
}
-void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_realloc(mi_get_default_heap(),p,newsize);
}
-void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_reallocn(mi_get_default_heap(),p,count,size);
}
// Reallocate but free `p` on errors
-void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_reallocf(mi_get_default_heap(),p,newsize);
}
-void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_rezalloc(mi_get_default_heap(), p, newsize);
}
-void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_recalloc(mi_get_default_heap(), p, count, size);
}
@@ -715,7 +739,7 @@ void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
// ------------------------------------------------------
// `strdup` using mi_malloc
-mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
if (s == NULL) return NULL;
size_t n = strlen(s);
char* t = (char*)mi_heap_malloc(heap,n+1);
@@ -723,12 +747,12 @@ mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_no
return t;
}
-mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
return mi_heap_strdup(mi_get_default_heap(), s);
}
// `strndup` using mi_malloc
-mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
if (s == NULL) return NULL;
const char* end = (const char*)memchr(s, 0, n); // find end of string in the first `n` characters (returns NULL if not found)
const size_t m = (end != NULL ? (size_t)(end - s) : n); // `m` is the minimum of `n` or the end-of-string
@@ -740,7 +764,7 @@ mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n)
return t;
}
-mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
return mi_heap_strndup(mi_get_default_heap(),s,n);
}
@@ -751,7 +775,7 @@ mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
#define PATH_MAX MAX_PATH
#endif
#include <windows.h>
-mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
// todo: use GetFullPathNameW to allow longer file names
char buf[PATH_MAX];
DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
@@ -797,7 +821,7 @@ char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name)
}
#endif
-mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name);
}
#endif
@@ -835,8 +859,8 @@ static bool mi_try_new_handler(bool nothrow) {
#else
typedef void (*std_new_handler_t)(void);
-#if (defined(__GNUC__) || defined(__clang__))
-std_new_handler_t __attribute((weak)) _ZSt15get_new_handlerv(void) {
+#if (defined(__GNUC__) || (defined(__clang__) && !defined(_MSC_VER))) // exclude clang-cl, see issue #631
+std_new_handler_t __attribute__((weak)) _ZSt15get_new_handlerv(void) {
return NULL;
}
static std_new_handler_t mi_get_new_handler(void) {
@@ -873,19 +897,19 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
return p;
}
-mi_decl_restrict void* mi_new(size_t size) {
+mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
void* p = mi_malloc(size);
- if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
+ if mi_unlikely(p == NULL) return mi_try_new(size,false);
return p;
}
-mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
void* p = mi_malloc(size);
- if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
+ if mi_unlikely(p == NULL) return mi_try_new(size, true);
return p;
}
-mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
+mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
void* p;
do {
p = mi_malloc_aligned(size, alignment);
@@ -894,7 +918,7 @@ mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
return p;
}
-mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
void* p;
do {
p = mi_malloc_aligned(size, alignment);
@@ -903,9 +927,9 @@ mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_
return p;
}
-mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
+mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
size_t total;
- if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
+ if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}
@@ -914,7 +938,7 @@ mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
}
}
-void* mi_new_realloc(void* p, size_t newsize) {
+mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
void* q;
do {
q = mi_realloc(p, newsize);
@@ -922,9 +946,9 @@ void* mi_new_realloc(void* p, size_t newsize) {
return q;
}
-void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
+mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
size_t total;
- if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) {
+ if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}