summaryrefslogtreecommitdiff
path: root/source/luametatex/source/libraries/mimalloc/src/segment-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'source/luametatex/source/libraries/mimalloc/src/segment-cache.c')
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/segment-cache.c63
1 files changed, 52 insertions, 11 deletions
diff --git a/source/luametatex/source/libraries/mimalloc/src/segment-cache.c b/source/luametatex/source/libraries/mimalloc/src/segment-cache.c
index da726716a..d93fd6441 100644
--- a/source/luametatex/source/libraries/mimalloc/src/segment-cache.c
+++ b/source/luametatex/source/libraries/mimalloc/src/segment-cache.c
@@ -45,7 +45,11 @@ static bool mi_cdecl mi_segment_cache_is_suitable(mi_bitmap_index_t bitidx, void
return _mi_arena_memid_is_suitable(slot->memid, req_arena_id);
}
-mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
+mi_decl_noinline static void* mi_segment_cache_pop_ex(
+ bool all_suitable,
+ size_t size, mi_commit_mask_t* commit_mask,
+ mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero,
+ mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
{
#ifdef MI_CACHE_DISABLE
return NULL;
@@ -66,8 +70,8 @@ mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* comm
mi_bitmap_index_t bitidx = 0;
bool claimed = false;
mi_arena_id_t req_arena_id = _req_arena_id;
- mi_bitmap_pred_fun_t pred_fun = &mi_segment_cache_is_suitable; // cannot pass NULL as the arena may be exclusive itself; todo: do not put exclusive arenas in the cache?
-
+ mi_bitmap_pred_fun_t pred_fun = (all_suitable ? NULL : &mi_segment_cache_is_suitable); // cannot pass NULL as the arena may be exclusive itself; todo: do not put exclusive arenas in the cache?
+
if (*large) { // large allowed?
claimed = _mi_bitmap_try_find_from_claim_pred(cache_available_large, MI_CACHE_FIELDS, start_field, 1, pred_fun, &req_arena_id, &bitidx);
if (claimed) *large = true;
@@ -97,6 +101,12 @@ mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* comm
#endif
}
+
+mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
+{
+ return mi_segment_cache_pop_ex(false, size, commit_mask, decommit_mask, large, is_pinned, is_zero, _req_arena_id, memid, tld);
+}
+
static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, void* p, size_t total, mi_stats_t* stats)
{
if (mi_commit_mask_is_empty(cmask)) {
@@ -123,14 +133,14 @@ static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, vo
#define MI_MAX_PURGE_PER_PUSH (4)
-static mi_decl_noinline void mi_segment_cache_purge(bool force, mi_os_tld_t* tld)
+static mi_decl_noinline void mi_segment_cache_purge(bool visit_all, bool force, mi_os_tld_t* tld)
{
MI_UNUSED(tld);
if (!mi_option_is_enabled(mi_option_allow_decommit)) return;
mi_msecs_t now = _mi_clock_now();
size_t purged = 0;
- const size_t max_visits = (force ? MI_CACHE_MAX /* visit all */ : MI_CACHE_FIELDS /* probe at most N (=16) slots */);
- size_t idx = (force ? 0 : _mi_random_shuffle((uintptr_t)now) % MI_CACHE_MAX /* random start */ );
+ const size_t max_visits = (visit_all ? MI_CACHE_MAX /* visit all */ : MI_CACHE_FIELDS /* probe at most N (=16) slots */);
+ size_t idx = (visit_all ? 0 : _mi_random_shuffle((uintptr_t)now) % MI_CACHE_MAX /* random start */ );
for (size_t visited = 0; visited < max_visits; visited++,idx++) { // visit N slots
if (idx >= MI_CACHE_MAX) idx = 0; // wrap
mi_cache_slot_t* slot = &cache[idx];
@@ -154,13 +164,43 @@ static mi_decl_noinline void mi_segment_cache_purge(bool force, mi_os_tld_t* tld
}
_mi_bitmap_unclaim(cache_available, MI_CACHE_FIELDS, 1, bitidx); // make it available again for a pop
}
- if (!force && purged > MI_MAX_PURGE_PER_PUSH) break; // bound to no more than N purge tries per push
+ if (!visit_all && purged > MI_MAX_PURGE_PER_PUSH) break; // bound to no more than N purge tries per push
}
}
}
void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld) {
- mi_segment_cache_purge(force, tld );
+ if (force) {
+ // called on `mi_collect(true)` but not on thread termination
+ _mi_segment_cache_free_all(tld);
+ }
+ else {
+ mi_segment_cache_purge(true /* visit all */, false /* don't force unexpired */, tld);
+ }
+}
+
+void _mi_segment_cache_free_all(mi_os_tld_t* tld) {
+ mi_commit_mask_t commit_mask;
+ mi_commit_mask_t decommit_mask;
+ bool is_pinned;
+ bool is_zero;
+ size_t memid;
+ const size_t size = MI_SEGMENT_SIZE;
+ // iterate twice: first large pages, then regular memory
+ for (int i = 0; i < 2; i++) {
+ void* p;
+ do {
+ // keep popping and freeing the memory
+ bool large = (i == 0);
+ p = mi_segment_cache_pop_ex(true /* all */, size, &commit_mask, &decommit_mask,
+ &large, &is_pinned, &is_zero, _mi_arena_id_none(), &memid, tld);
+ if (p != NULL) {
+ size_t csize = _mi_commit_mask_committed_size(&commit_mask, size);
+ if (csize > 0 && !is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
+ _mi_arena_free(p, size, MI_SEGMENT_ALIGN, 0, memid, is_pinned /* pretend not committed to not double count decommits */, tld->stats);
+ }
+ } while (p != NULL);
+ }
}
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
@@ -181,7 +221,7 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
}
// purge expired entries
- mi_segment_cache_purge(false /* force? */, tld);
+ mi_segment_cache_purge(false /* limit purges to a constant N */, false /* don't force unexpired */, tld);
// find an available slot
mi_bitmap_index_t bitidx;
@@ -245,7 +285,7 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments
static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) {
- mi_assert_internal(_mi_ptr_segment(segment) == segment); // is it aligned on MI_SEGMENT_SIZE?
+ mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE?
if ((uintptr_t)segment >= MI_MAX_ADDRESS) {
*bitidx = 0;
return MI_SEGMENT_MAP_WSIZE;
@@ -285,8 +325,9 @@ void _mi_segment_map_freed_at(const mi_segment_t* segment) {
// Determine the segment belonging to a pointer or NULL if it is not in a valid segment.
static mi_segment_t* _mi_segment_of(const void* p) {
+ if (p == NULL) return NULL;
mi_segment_t* segment = _mi_ptr_segment(p);
- if (segment == NULL) return NULL;
+ mi_assert_internal(segment != NULL);
size_t bitidx;
size_t index = mi_segment_map_index_of(segment, &bitidx);
// fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge