summaryrefslogtreecommitdiff
path: root/source/luametatex/source/libraries/mimalloc/include/mimalloc-internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'source/luametatex/source/libraries/mimalloc/include/mimalloc-internal.h')
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc-internal.h90
1 files changed, 67 insertions, 23 deletions
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc-internal.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc-internal.h
index d691eca58..550b65433 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc-internal.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc-internal.h
@@ -9,6 +9,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define MIMALLOC_INTERNAL_H
#include "mimalloc-types.h"
+#include "mimalloc-track.h"
#if (MI_DEBUG>0)
#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
@@ -88,12 +89,14 @@ size_t _mi_os_good_alloc_size(size_t size);
bool _mi_os_has_overcommit(void);
// arena.c
-void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
-void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
+void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
+void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os_tld_t* tld);
+mi_arena_id_t _mi_arena_id_none(void);
+bool _mi_arena_memid_is_suitable(size_t memid, mi_arena_id_t req_arena_id);
// "segment-cache.c"
-void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
+void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld);
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
@@ -115,16 +118,18 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t*
// "page.c"
-void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc;
+void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept mi_attr_malloc;
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
void _mi_page_unfull(mi_page_t* page);
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
-void _mi_heap_delayed_free(mi_heap_t* heap);
+void _mi_heap_delayed_free_all(mi_heap_t* heap);
+bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
+bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
void _mi_deferred_free(mi_heap_t* heap, bool force);
@@ -138,6 +143,7 @@ uint8_t _mi_bin(size_t size); // for stats
void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap);
+bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);
@@ -147,12 +153,11 @@ mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void);
// "alloc.c"
-void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic`
+void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic`
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
bool _mi_free_delayed_block(mi_block_t* block);
-void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);
#if MI_DEBUG>1
bool _mi_page_is_valid(mi_page_t* page);
@@ -164,8 +169,11 @@ bool _mi_page_is_valid(mi_page_t* page);
// ------------------------------------------------------
#if defined(__GNUC__) || defined(__clang__)
-#define mi_unlikely(x) __builtin_expect(!!(x),false)
-#define mi_likely(x) __builtin_expect(!!(x),true)
+#define mi_unlikely(x) (__builtin_expect(!!(x),false))
+#define mi_likely(x) (__builtin_expect(!!(x),true))
+#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
+#define mi_unlikely(x) (x) [[unlikely]]
+#define mi_likely(x) (x) [[likely]]
#else
#define mi_unlikely(x) (x)
#define mi_likely(x) (x)
@@ -224,6 +232,12 @@ static inline bool _mi_is_power_of_two(uintptr_t x) {
return ((x & (x - 1)) == 0);
}
+// Is a pointer aligned?
+static inline bool _mi_is_aligned(void* p, size_t alignment) {
+ mi_assert_internal(alignment != 0);
+ return (((uintptr_t)p % alignment) == 0);
+}
+
// Align upwards
static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
mi_assert_internal(alignment != 0);
@@ -289,8 +303,8 @@ static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
*total = count * size;
- return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW)
- && size > 0 && (SIZE_MAX / size) < count);
+ // note: gcc/clang optimize this to directly check the overflow flag
+ return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count);
}
#endif
@@ -300,8 +314,10 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot
*total = size;
return false;
}
- else if (mi_unlikely(mi_mul_overflow(count, size, total))) {
+ else if mi_unlikely(mi_mul_overflow(count, size, total)) {
+ #if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size);
+ #endif
*total = SIZE_MAX;
return true;
}
@@ -372,7 +388,7 @@ extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate
static inline mi_heap_t* mi_get_default_heap(void) {
#if defined(MI_TLS_SLOT)
mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT);
- if (mi_unlikely(heap == NULL)) {
+ if mi_unlikely(heap == NULL) {
#ifdef __GNUC__
__asm(""); // prevent conditional load of the address of _mi_heap_empty
#endif
@@ -486,7 +502,7 @@ static inline mi_page_t* _mi_ptr_page(void* p) {
static inline size_t mi_page_block_size(const mi_page_t* page) {
const size_t bsize = page->xblock_size;
mi_assert_internal(bsize > 0);
- if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) {
+ if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
return bsize;
}
else {
@@ -649,30 +665,36 @@ static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
- return (mi_unlikely(p==null) ? NULL : p);
+ return (p==null ? NULL : p);
}
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
- uintptr_t x = (uintptr_t)(mi_unlikely(p==NULL) ? null : p);
+ uintptr_t x = (uintptr_t)(p==NULL ? null : p);
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
}
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) {
+ mi_track_mem_defined(block,sizeof(mi_block_t));
+ mi_block_t* next;
#ifdef MI_ENCODE_FREELIST
- return (mi_block_t*)mi_ptr_decode(null, block->next, keys);
+ next = (mi_block_t*)mi_ptr_decode(null, block->next, keys);
#else
MI_UNUSED(keys); MI_UNUSED(null);
- return (mi_block_t*)block->next;
+ next = (mi_block_t*)block->next;
#endif
+ mi_track_mem_noaccess(block,sizeof(mi_block_t));
+ return next;
}
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) {
+ mi_track_mem_undefined(block,sizeof(mi_block_t));
#ifdef MI_ENCODE_FREELIST
block->next = mi_ptr_encode(null, next, keys);
#else
MI_UNUSED(keys); MI_UNUSED(null);
block->next = (mi_encoded_t)next;
#endif
+ mi_track_mem_noaccess(block,sizeof(mi_block_t));
}
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
@@ -680,7 +702,7 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t*
mi_block_t* next = mi_block_nextx(page,block,page->keys);
// check for free list corruption: is `next` at least in the same page?
// TODO: check if `next` is `page->block_size` aligned?
- if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
+ if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
next = NULL;
}
@@ -779,12 +801,12 @@ size_t _mi_os_numa_node_count_get(void);
extern _Atomic(size_t) _mi_numa_node_count;
static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
- if (mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1)) return 0;
+ if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
else return _mi_os_numa_node_get(tld);
}
static inline size_t _mi_os_numa_node_count(void) {
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
- if (mi_likely(count>0)) return count;
+ if mi_likely(count > 0) { return count; }
else return _mi_os_numa_node_count_get();
}
@@ -1003,7 +1025,7 @@ static inline size_t mi_bsr(uintptr_t x) {
// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253.
// ---------------------------------------------------------------------------------
-#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
+#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
#include <intrin.h>
#include <string.h>
extern bool _mi_cpu_has_fsrm;
@@ -1012,7 +1034,15 @@ static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
__movsb((unsigned char*)dst, (const unsigned char*)src, n);
}
else {
- memcpy(dst, src, n); // todo: use noinline?
+ memcpy(dst, src, n);
+ }
+}
+static inline void _mi_memzero(void* dst, size_t n) {
+ if (_mi_cpu_has_fsrm) {
+ __stosb((unsigned char*)dst, 0, n);
+ }
+ else {
+ memset(dst, 0, n);
}
}
#else
@@ -1020,6 +1050,9 @@ static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
memcpy(dst, src, n);
}
+static inline void _mi_memzero(void* dst, size_t n) {
+ memset(dst, 0, n);
+}
#endif
@@ -1037,12 +1070,23 @@ static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE);
_mi_memcpy(adst, asrc, n);
}
+
+static inline void _mi_memzero_aligned(void* dst, size_t n) {
+ mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
+ void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
+ _mi_memzero(adst, n);
+}
#else
// Default fallback on `_mi_memcpy`
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
_mi_memcpy(dst, src, n);
}
+
+static inline void _mi_memzero_aligned(void* dst, size_t n) {
+ mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
+ _mi_memzero(dst, n);
+}
#endif