summaryrefslogtreecommitdiff
path: root/source/luametatex/source/libraries/mimalloc/include
diff options
context:
space:
mode:
Diffstat (limited to 'source/luametatex/source/libraries/mimalloc/include')
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc.h68
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc/atomic.h54
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc/internal.h84
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc/prim.h32
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc/track.h2
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc/types.h75
6 files changed, 220 insertions, 95 deletions
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc.h
index 800cfd7e4..f77c2ea17 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc.h
@@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
-#define MI_MALLOC_VERSION 211 // major + 2 digits minor
+#define MI_MALLOC_VERSION 212 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes
@@ -284,7 +284,7 @@ mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node,
mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
-#if MI_MALLOC_VERSION >= 200
+#if MI_MALLOC_VERSION >= 182
// Create a heap that only allocates in the specified arena
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
#endif
@@ -318,35 +318,40 @@ mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size
typedef enum mi_option_e {
// stable options
- mi_option_show_errors,
- mi_option_show_stats,
- mi_option_verbose,
- // some of the following options are experimental
- // (deprecated options are kept for binary backward compatibility with v1.x versions)
- mi_option_eager_commit,
- mi_option_deprecated_eager_region_commit,
- mi_option_deprecated_reset_decommits,
- mi_option_large_os_pages, // use large (2MiB) OS pages, implies eager commit
- mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB) at startup
+ mi_option_show_errors, // print error messages
+ mi_option_show_stats, // print statistics on termination
+ mi_option_verbose, // print verbose messages
+ // the following options are experimental (see src/options.h)
+ mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1)
+ mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2)
+ mi_option_purge_decommits, // should a memory purge decommit (or only reset) (=1)
+ mi_option_allow_large_os_pages, // allow large (2MiB) OS pages, implies eager commit
+ mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB/page) at startup
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
- mi_option_reserve_os_memory, // reserve specified amount of OS memory at startup
+ mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup
mi_option_deprecated_segment_cache,
- mi_option_page_reset,
- mi_option_abandoned_page_decommit,
- mi_option_deprecated_segment_reset,
- mi_option_eager_commit_delay,
- mi_option_decommit_delay,
- mi_option_use_numa_nodes, // 0 = use available numa nodes, otherwise use at most N nodes.
- mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only reserved arenas)
- mi_option_os_tag,
- mi_option_max_errors,
- mi_option_max_warnings,
- mi_option_max_segment_reclaim,
- mi_option_allow_decommit,
- mi_option_segment_decommit_delay,
- mi_option_decommit_extend_delay,
- mi_option_destroy_on_exit,
- _mi_option_last
+ mi_option_deprecated_page_reset,
+ mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
+ mi_option_deprecated_segment_reset,
+ mi_option_eager_commit_delay,
+ mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all.
+ mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
+ mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
+ mi_option_os_tag, // tag used for OS logging (macOS only for now)
+ mi_option_max_errors, // issue at most N error messages
+ mi_option_max_warnings, // issue at most N warning messages
+ mi_option_max_segment_reclaim,
+ mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe.
+ mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
+ mi_option_arena_purge_mult,
+ mi_option_purge_extend_delay,
+ _mi_option_last,
+ // legacy option names
+ mi_option_large_os_pages = mi_option_allow_large_os_pages,
+ mi_option_eager_region_commit = mi_option_arena_eager_commit,
+ mi_option_reset_decommits = mi_option_purge_decommits,
+ mi_option_reset_delay = mi_option_purge_delay,
+ mi_option_abandoned_page_reset = mi_option_abandoned_page_purge
} mi_option_t;
@@ -356,8 +361,9 @@ mi_decl_export void mi_option_disable(mi_option_t option);
mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable);
mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable);
-mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option);
-mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max);
+mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option);
+mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max);
+mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option);
mi_decl_export void mi_option_set(mi_option_t option, long value);
mi_decl_export void mi_option_set_default(mi_option_t option, long value);
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc/atomic.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc/atomic.h
index fe79fbcaf..fe418fab3 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc/atomic.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc/atomic.h
@@ -39,7 +39,11 @@ terms of the MIT license. A copy of the license can be found in the file
#include <stdatomic.h>
#define mi_atomic(name) atomic_##name
#define mi_memory_order(name) memory_order_##name
-#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
+#if !defined(ATOMIC_VAR_INIT) || (__STDC_VERSION__ >= 201710L) // c17, see issue #735
+ #define MI_ATOMIC_VAR_INIT(x) x
+#else
+ #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
+#endif
#endif
// Various defines for all used memory orders in mimalloc
@@ -113,11 +117,13 @@ static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
}
// Used by timers
-#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
-#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
-#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
-#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
+#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
+#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
+#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
+#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
+#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d)
+#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i)
#elif defined(_MSC_VER)
@@ -245,6 +251,21 @@ static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t
} while (current < x && _InterlockedCompareExchange64(p, x, current) != current);
}
+static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) {
+ mi_atomic_addi64_relaxed(p, i);
+}
+
+static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) {
+ int64_t read = _InterlockedCompareExchange64(p, des, *exp);
+ if (read == *exp) {
+ return true;
+ }
+ else {
+ *exp = read;
+ return false;
+ }
+}
+
// The pointer macros cast to `uintptr_t`.
#define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p))
#define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p))
@@ -281,9 +302,20 @@ typedef _Atomic(uintptr_t) mi_atomic_once_t;
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
uintptr_t expected = 0;
- return mi_atomic_cas_strong_acq_rel(once, &expected, 1); // try to set to 1
+ return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
}
+typedef _Atomic(uintptr_t) mi_atomic_guard_t;
+
+// Allows only one thread to execute at a time
+#define mi_atomic_guard(guard) \
+ uintptr_t _mi_guard_expected = 0; \
+ for(bool _mi_guard_once = true; \
+ _mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \
+ (mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) )
+
+
+
// Yield
#if defined(__cplusplus)
#include <thread>
@@ -303,7 +335,7 @@ static inline void mi_atomic_yield(void) {
}
#elif (defined(__GNUC__) || defined(__clang__)) && \
(defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \
- defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__))
+ defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) || defined(__POWERPC__)
#if defined(__x86_64__) || defined(__i386__)
static inline void mi_atomic_yield(void) {
__asm__ volatile ("pause" ::: "memory");
@@ -316,10 +348,16 @@ static inline void mi_atomic_yield(void) {
static inline void mi_atomic_yield(void) {
__asm__ volatile("yield" ::: "memory");
}
-#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)
+#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)
+#ifdef __APPLE__
+static inline void mi_atomic_yield(void) {
+ __asm__ volatile ("or r27,r27,r27" ::: "memory");
+}
+#else
static inline void mi_atomic_yield(void) {
__asm__ __volatile__ ("or 27,27,27" ::: "memory");
}
+#endif
#elif defined(__armel__) || defined(__ARMEL__)
static inline void mi_atomic_yield(void) {
__asm__ volatile ("nop" ::: "memory");
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc/internal.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc/internal.h
index a4495c161..00d262609 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc/internal.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc/internal.h
@@ -80,49 +80,52 @@ extern mi_decl_cache_align mi_stats_t _mi_stats_main;
extern mi_decl_cache_align const mi_page_t _mi_page_empty;
bool _mi_is_main_thread(void);
size_t _mi_current_thread_count(void);
-bool _mi_preloading(void); // true while the C runtime is not ready
+bool _mi_preloading(void); // true while the C runtime is not initialized yet
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
-mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
+mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
void _mi_thread_done(mi_heap_t* heap);
+void _mi_thread_data_collect(void);
// os.c
-void _mi_os_init(void); // called from process init
-void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocate thread local data
-void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
+void _mi_os_init(void); // called from process init
+void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
+void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
+void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
+
size_t _mi_os_page_size(void);
size_t _mi_os_good_alloc_size(size_t size);
bool _mi_os_has_overcommit(void);
+bool _mi_os_has_virtual_reserve(void);
+bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size);
+bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
+bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats);
+
+void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats);
+void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats);
-void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_stats_t* stats);
-void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool* large, mi_stats_t* tld_stats);
-void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats);
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
bool _mi_os_use_large_page(size_t size, size_t alignment);
size_t _mi_os_large_page_size(void);
-void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
-void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize);
-void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
+void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
// arena.c
mi_arena_id_t _mi_arena_id_none(void);
-void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, bool all_committed, mi_stats_t* stats);
-void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
-void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
-bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id);
-bool _mi_arena_is_os_allocated(size_t arena_memid);
-
-// "segment-cache.c"
-void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
-bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
-void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld);
-void _mi_segment_cache_free_all(mi_os_tld_t* tld);
+void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats);
+void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
+void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
+bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
+bool _mi_arena_contains(const void* p);
+void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
+void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
+
+// "segment-map.c"
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
void _mi_segment_map_freed_at(const mi_segment_t* segment);
@@ -170,8 +173,8 @@ uint8_t _mi_bin(size_t size); // for stats
void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap);
-bool _mi_heap_memid_is_suitable(mi_heap_t* heap, size_t memid);
-void _mi_heap_destroy_all(void);
+bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
+void _mi_heap_unsafe_destroy_all(void);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);
@@ -266,6 +269,10 @@ bool _mi_page_is_valid(mi_page_t* page);
#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x)
+#include <string.h>
+// initialize a local variable to zero; use memset as compilers optimize constant sized memset's
+#define _mi_memzero_var(x) memset(&x,0,sizeof(x))
+
// Is `x` a power of two? (0 is considered a power of two)
static inline bool _mi_is_power_of_two(uintptr_t x) {
return ((x & (x - 1)) == 0);
@@ -308,7 +315,7 @@ static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
}
// Is memory zero initialized?
-static inline bool mi_mem_is_zero(void* p, size_t size) {
+static inline bool mi_mem_is_zero(const void* p, size_t size) {
for (size_t i = 0; i < size; i++) {
if (((uint8_t*)p)[i] != 0) return false;
}
@@ -727,6 +734,29 @@ size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
+/* -----------------------------------------------------------
+ memory id's
+----------------------------------------------------------- */
+
+static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) {
+ mi_memid_t memid;
+ _mi_memzero_var(memid);
+ memid.memkind = memkind;
+ return memid;
+}
+
+static inline mi_memid_t _mi_memid_none(void) {
+ return _mi_memid_create(MI_MEM_NONE);
+}
+
+static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool is_large) {
+ mi_memid_t memid = _mi_memid_create(MI_MEM_OS);
+ memid.initially_committed = committed;
+ memid.initially_zero = is_zero;
+ memid.is_pinned = is_large;
+ return memid;
+}
+
// -------------------------------------------------------------------
// Fast "random" shuffle
@@ -887,7 +917,6 @@ static inline size_t mi_bsr(uintptr_t x) {
#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
#include <intrin.h>
-#include <string.h>
extern bool _mi_cpu_has_fsrm;
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
if (_mi_cpu_has_fsrm) {
@@ -906,7 +935,6 @@ static inline void _mi_memzero(void* dst, size_t n) {
}
}
#else
-#include <string.h>
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
memcpy(dst, src, n);
}
@@ -915,7 +943,6 @@ static inline void _mi_memzero(void* dst, size_t n) {
}
#endif
-
// -------------------------------------------------------------------------------
// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned
// This is used for example in `mi_realloc`.
@@ -923,7 +950,6 @@ static inline void _mi_memzero(void* dst, size_t n) {
#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
// On GCC/CLang we provide a hint that the pointers are word aligned.
-#include <string.h>
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc/prim.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc/prim.h
index 10378c922..9e560696f 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc/prim.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc/prim.h
@@ -14,7 +14,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Each OS/host needs to implement these primitives, see `src/prim`
// for implementations on Window, macOS, WASI, and Linux/Unix.
//
-// note: on all primitive functions, we always get:
+// note: on all primitive functions, we always have result parameters != NUL, and:
// addr != NULL and page aligned
// size > 0 and page aligned
// return value is an error code an int where 0 is success.
@@ -22,11 +22,12 @@ terms of the MIT license. A copy of the license can be found in the file
// OS memory configuration
typedef struct mi_os_mem_config_s {
- size_t page_size; // 4KiB
- size_t large_page_size; // 2MiB
- size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
- bool has_overcommit; // can we reserve more memory than can be actually committed?
- bool must_free_whole; // must allocated blocks free as a whole (false for mmap, true for VirtualAlloc)
+ size_t page_size; // 4KiB
+ size_t large_page_size; // 2MiB
+ size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
+ bool has_overcommit; // can we reserve more memory than can be actually committed?
+ bool must_free_whole; // must allocated blocks be freed as a whole (false for mmap, true for VirtualAlloc)
+ bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
} mi_os_mem_config_t;
// Initialize
@@ -37,12 +38,23 @@ int _mi_prim_free(void* addr, size_t size );
// Allocate OS memory. Return NULL on error.
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
+// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
+// which will later be committed explicitly using `_mi_prim_commit`.
+// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
// pre: !commit => !allow_large
// try_alignment >= _mi_os_page_size() and a power of 2
-int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, void** addr);
+int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
// Commit memory. Returns error code or 0 on success.
-int _mi_prim_commit(void* addr, size_t size, bool commit);
+// For example, on Linux this would make the memory PROT_READ|PROT_WRITE.
+// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows)
+int _mi_prim_commit(void* addr, size_t size, bool* is_zero);
+
+// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true
+// if the memory would need to be re-committed. For example, on Windows this is always true,
+// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit.
+// pre: needs_recommit != NULL
+int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit);
// Reset memory. The range keeps being accessible but the content might be reset.
// Returns error code or 0 on success.
@@ -52,10 +64,10 @@ int _mi_prim_reset(void* addr, size_t size);
int _mi_prim_protect(void* addr, size_t size, bool protect);
// Allocate huge (1GiB) pages possibly associated with a NUMA node.
+// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
// pre: size > 0 and a multiple of 1GiB.
-// addr is either NULL or an address hint.
// numa_node is either negative (don't care), or a numa node number.
-int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, void** addr);
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr);
// Return the current NUMA node
size_t _mi_prim_numa_node(void);
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc/track.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc/track.h
index f78e8daa7..9545f7507 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc/track.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc/track.h
@@ -79,7 +79,7 @@ defined, undefined, or not accessible at all:
// windows event tracing
#define MI_TRACK_ENABLED 1
-#define MI_TRACK_HEAP_DESTROY 0
+#define MI_TRACK_HEAP_DESTROY 1
#define MI_TRACK_TOOL "ETW"
#define WIN32_LEAN_AND_MEAN
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc/types.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc/types.h
index c7ddaaaef..2005238a6 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc/types.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc/types.h
@@ -172,7 +172,7 @@ typedef int32_t mi_ssize_t;
// Derived constants
#define MI_SEGMENT_SIZE (MI_ZU(1)<<MI_SEGMENT_SHIFT)
#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
-#define MI_SEGMENT_MASK (MI_SEGMENT_ALIGN - 1)
+#define MI_SEGMENT_MASK ((uintptr_t)(MI_SEGMENT_ALIGN - 1))
#define MI_SEGMENT_SLICE_SIZE (MI_ZU(1)<< MI_SEGMENT_SLICE_SHIFT)
#define MI_SLICES_PER_SEGMENT (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
@@ -291,16 +291,15 @@ typedef uintptr_t mi_thread_free_t;
typedef struct mi_page_s {
// "owned" by the segment
uint32_t slice_count; // slices in this page (0 if not a page)
- uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
- uint8_t is_reset : 1; // `true` if the page memory was reset
+ uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
- uint8_t is_zero_init : 1; // `true` if the page was zero initialized
+ uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
// layout like this to optimize access in `mi_malloc` and `mi_free`
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
uint16_t reserved; // number of blocks reserved in memory
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
- uint8_t is_zero : 1; // `true` if the blocks in the free list are zero initialized
+ uint8_t free_is_zero : 1; // `true` if the blocks in the free list are zero initialized
uint8_t retire_expire : 7; // expiration count for retired blocks
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
@@ -326,6 +325,10 @@ typedef struct mi_page_s {
+// ------------------------------------------------------
+// Mimalloc segments contain mimalloc pages
+// ------------------------------------------------------
+
typedef enum mi_page_kind_e {
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment
@@ -350,7 +353,7 @@ typedef enum mi_segment_kind_e {
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
// ------------------------------------------------------
-#define MI_MINIMAL_COMMIT_SIZE (16*MI_SEGMENT_SLICE_SIZE) // 1MiB
+#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
@@ -368,20 +371,57 @@ typedef mi_page_t mi_slice_t;
typedef int64_t mi_msecs_t;
+// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
+typedef enum mi_memkind_e {
+ MI_MEM_NONE, // not allocated
+ MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
+ MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
+ MI_MEM_OS, // allocated from the OS
+ MI_MEM_OS_HUGE, // allocated as huge os pages
+ MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`)
+ MI_MEM_ARENA // allocated from an arena (the usual case)
+} mi_memkind_t;
+
+static inline bool mi_memkind_is_os(mi_memkind_t memkind) {
+ return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP);
+}
+
+typedef struct mi_memid_os_info {
+ void* base; // actual base address of the block (used for offset aligned allocations)
+ size_t alignment; // alignment at allocation
+} mi_memid_os_info_t;
+
+typedef struct mi_memid_arena_info {
+ size_t block_index; // index in the arena
+ mi_arena_id_t id; // arena id (>= 1)
+ bool is_exclusive; // the arena can only be used for specific arena allocations
+} mi_memid_arena_info_t;
+
+typedef struct mi_memid_s {
+ union {
+ mi_memid_os_info_t os; // only used for MI_MEM_OS
+ mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA
+ } mem;
+ bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
+ bool initially_committed;// `true` if the memory was originally allocated as committed
+ bool initially_zero; // `true` if the memory was originally zero initialized
+ mi_memkind_t memkind;
+} mi_memid_t;
+
+
// Segments are large allocated memory blocks (8mb on 64 bit) from
// the OS. Inside segments we allocated fixed size _pages_ that
// contain blocks.
typedef struct mi_segment_s {
- size_t memid; // memory id for arena allocation
- bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
- bool mem_is_large; // in large/huge os pages?
- bool mem_is_committed; // `true` if the whole segment is eagerly committed
- size_t mem_alignment; // page alignment for huge pages (only used for alignment > MI_ALIGNMENT_MAX)
- size_t mem_align_offset; // offset for huge page alignment (only used for alignment > MI_ALIGNMENT_MAX)
-
- bool allow_decommit;
- mi_msecs_t decommit_expire;
- mi_commit_mask_t decommit_mask;
+ // constant fields
+ mi_memid_t memid; // memory id for arena allocation
+ bool allow_decommit;
+ bool allow_purge;
+ size_t segment_size;
+
+ // segment fields
+ mi_msecs_t purge_expire;
+ mi_commit_mask_t purge_mask;
mi_commit_mask_t commit_mask;
_Atomic(struct mi_segment_s*) abandoned_next;
@@ -540,6 +580,7 @@ typedef struct mi_stats_s {
mi_stat_count_t reserved;
mi_stat_count_t committed;
mi_stat_count_t reset;
+ mi_stat_count_t purged;
mi_stat_count_t page_committed;
mi_stat_count_t segments_abandoned;
mi_stat_count_t pages_abandoned;
@@ -552,6 +593,8 @@ typedef struct mi_stats_s {
mi_stat_counter_t pages_extended;
mi_stat_counter_t mmap_calls;
mi_stat_counter_t commit_calls;
+ mi_stat_counter_t reset_calls;
+ mi_stat_counter_t purge_calls;
mi_stat_counter_t page_no_retire;
mi_stat_counter_t searches;
mi_stat_counter_t normal_count;