summaryrefslogtreecommitdiff
path: root/source/luametatex/source/libraries/mimalloc/src/page.c
diff options
context:
space:
mode:
Diffstat (limited to 'source/luametatex/source/libraries/mimalloc/src/page.c')
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/page.c84
1 files changed, 54 insertions, 30 deletions
diff --git a/source/luametatex/source/libraries/mimalloc/src/page.c b/source/luametatex/source/libraries/mimalloc/src/page.c
index 4b321156c..4250ff358 100644
--- a/source/luametatex/source/libraries/mimalloc/src/page.c
+++ b/source/luametatex/source/libraries/mimalloc/src/page.c
@@ -112,7 +112,10 @@ bool _mi_page_is_valid(mi_page_t* page) {
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
- if (segment->kind != MI_SEGMENT_HUGE) {
+ #if MI_HUGE_PAGE_ABANDON
+ if (segment->kind != MI_SEGMENT_HUGE)
+ #endif
+ {
mi_page_queue_t* pq = mi_page_queue_of(page);
mi_assert_internal(mi_page_queue_contains(pq, page));
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
@@ -132,7 +135,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
mi_thread_free_t tfreex;
mi_delayed_t old_delay;
- mi_thread_free_t tfree;
+ mi_thread_free_t tfree;
size_t yield_count = 0;
do {
tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS;
@@ -245,7 +248,9 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
mi_assert_internal(mi_page_heap(page) == heap);
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
+ #if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
+ #endif
mi_assert_internal(!page->is_reset);
// TODO: push on full queue immediately if it is full?
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
@@ -254,17 +259,26 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
}
// allocate a fresh page from a segment
-static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size) {
- mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq));
- mi_page_t* page = _mi_segment_page_alloc(heap, block_size, &heap->tld->segments, &heap->tld->os);
+static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) {
+ #if !MI_HUGE_PAGE_ABANDON
+ mi_assert_internal(pq != NULL);
+ mi_assert_internal(mi_heap_contains_queue(heap, pq));
+ mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
+ #endif
+ mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
if (page == NULL) {
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
return NULL;
}
- mi_assert_internal(pq==NULL || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
- mi_page_init(heap, page, block_size, heap->tld);
+ mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
+ mi_assert_internal(pq!=NULL || page->xblock_size != 0);
+ mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
+ // a fresh page was found, initialize it
+ const size_t full_block_size = ((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
+ mi_assert_internal(full_block_size >= block_size);
+ mi_page_init(heap, page, full_block_size, heap->tld);
mi_heap_stat_increase(heap, pages, 1);
- if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
+ if (pq != NULL) { mi_page_queue_push(heap, pq, page); }
mi_assert_expensive(_mi_page_is_valid(page));
return page;
}
@@ -272,7 +286,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
// Get a fresh page to use
static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
mi_assert_internal(mi_heap_contains_queue(heap, pq));
- mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size);
+ mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0);
if (page==NULL) return NULL;
mi_assert_internal(pq->block_size==mi_page_block_size(page));
mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page)));
@@ -402,7 +416,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
}
// Retire parameters
-#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX
+#define MI_MAX_RETIRE_SIZE (MI_MEDIUM_OBJ_SIZE_MAX)
#define MI_RETIRE_CYCLES (8)
// Retire a page with no more used blocks
@@ -425,7 +439,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
// how to check this efficiently though...
// for now, we don't retire if it is the only page left of this size class.
mi_page_queue_t* pq = mi_page_queue_of(page);
- if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page)) {
+ if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_queue_is_special(pq)) { // not too large && not full or huge queue?
if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
@@ -573,7 +587,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
#if (MI_SECURE>0)
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
#else
-#define MI_MIN_EXTEND (1)
+#define MI_MIN_EXTEND (4)
#endif
// Extend the capacity (up to reserved) by initializing a free list
@@ -603,7 +617,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)bsize);
if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
mi_assert_internal(max_extend > 0);
-
+
if (extend > max_extend) {
// ensure we don't touch memory beyond the page to reduce page commit.
// the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%.
@@ -648,6 +662,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
mi_assert_internal(page_size / block_size < (1L<<16));
page->reserved = (uint16_t)(page_size / block_size);
+ mi_assert_internal(page->reserved > 0);
#ifdef MI_ENCODE_FREELIST
page->keys[0] = _mi_heap_random_next(heap);
page->keys[1] = _mi_heap_random_next(heap);
@@ -725,7 +740,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
page = mi_page_fresh(heap, pq);
if (page == NULL && first_try) {
// out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
- page = mi_page_queue_find_free_ex(heap, pq, false);
+ page = mi_page_queue_find_free_ex(heap, pq, false);
}
}
else {
@@ -743,17 +758,17 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
mi_page_queue_t* pq = mi_page_queue(heap,size);
mi_page_t* page = pq->first;
if (page != NULL) {
- #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
+ #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) {
mi_page_extend_free(heap, page, heap->tld);
mi_assert_internal(mi_page_immediate_available(page));
}
- else
+ else
#endif
{
_mi_page_free_collect(page,false);
}
-
+
if (mi_page_immediate_available(page)) {
page->retire_expire = 0;
return page; // fast path
@@ -797,21 +812,28 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
// Because huge pages contain just one block, and the segment contains
// just that page, we always treat them as abandoned and any thread
// that frees the block can free the whole page and segment directly.
-static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
+// Huge pages are also use if the requested alignment is very large (> MI_ALIGNMENT_MAX).
+static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
size_t block_size = _mi_os_good_alloc_size(size);
- mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE);
- bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX);
+ mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
+ bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0);
+ #if MI_HUGE_PAGE_ABANDON
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
- mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size);
+ #else
+ mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_HUGE_BLOCK_SIZE : block_size); // not block_size as that can be low if the page_alignment > 0
+ mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
+ #endif
+ mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
if (page != NULL) {
mi_assert_internal(mi_page_immediate_available(page));
- if (pq == NULL) {
- // huge pages are directly abandoned
+ if (is_huge) {
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
mi_assert_internal(_mi_page_segment(page)->used==1);
+ #if MI_HUGE_PAGE_ABANDON
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
mi_page_set_heap(page, NULL);
+ #endif
}
else {
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
@@ -833,16 +855,16 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
// Allocate a page
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
-static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
// huge allocation?
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
- if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE)) {
+ if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
return NULL;
}
else {
- return mi_large_huge_page_alloc(heap,size);
+ return mi_large_huge_page_alloc(heap,size,huge_alignment);
}
}
else {
@@ -854,7 +876,9 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
-void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
+// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for
+// very large requested alignments in which case we use a huge segment.
+void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
{
mi_assert_internal(heap != NULL);
@@ -873,14 +897,14 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexce
_mi_heap_delayed_free_partial(heap);
// find (or allocate) a page of the right size
- mi_page_t* page = mi_find_page(heap, size);
+ mi_page_t* page = mi_find_page(heap, size, huge_alignment);
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
mi_heap_collect(heap, true /* force */);
- page = mi_find_page(heap, size);
+ page = mi_find_page(heap, size, huge_alignment);
}
if mi_unlikely(page == NULL) { // out of memory
- const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
+ const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
_mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size);
return NULL;
}