smarter gc malloc for large allocations

This commit is contained in:
2026-02-06 18:38:23 -06:00
parent e2bc5948c1
commit 160ade2410

View File

@@ -1248,7 +1248,7 @@ void *js_realloc (JSContext *ctx, void *ptr, size_t size) {
============================================================ */
/* Forward declaration for ctx_gc */
static int ctx_gc (JSContext *ctx, int allow_grow);
static int ctx_gc (JSContext *ctx, int allow_grow, size_t alloc_size);
/* JS_MarkValue - mark a value during GC traversal.
With copying GC, this is a no-op as we discover live objects by tracing. */
@@ -2122,28 +2122,19 @@ void *js_malloc (JSContext *ctx, size_t size) {
#ifdef FORCE_GC_AT_MALLOC
/* Force GC on every allocation for testing - but don't grow heap unless needed */
int need_space = (uint8_t *)ctx->heap_free + size > (uint8_t *)ctx->heap_end;
if (ctx_gc(ctx, need_space) < 0) {
if (ctx_gc(ctx, need_space, size) < 0) {
JS_ThrowOutOfMemory(ctx);
return NULL;
}
/* Check if we have space after GC — the GC may have doubled
next_block_size but allocated the old size. Fall through to
the normal grow path so we get a bigger block. */
if ((uint8_t *)ctx->heap_free + size > (uint8_t *)ctx->heap_end) {
if (ctx_gc(ctx, 1) < 0) {
JS_ThrowOutOfMemory(ctx);
return NULL;
}
if ((uint8_t *)ctx->heap_free + size > (uint8_t *)ctx->heap_end) {
JS_ThrowOutOfMemory(ctx);
return NULL;
}
JS_ThrowOutOfMemory(ctx);
return NULL;
}
#else
/* Check if we have space in current block */
if ((uint8_t *)ctx->heap_free + size > (uint8_t *)ctx->heap_end) {
/* Trigger GC to reclaim memory */
if (ctx_gc (ctx, 1) < 0) {
if (ctx_gc (ctx, 1, size) < 0) {
JS_ThrowOutOfMemory (ctx);
return NULL;
}
@@ -2495,7 +2486,7 @@ static void heap_block_free(JSRuntime *rt, void *ptr, size_t size) {
============================================================ */
/* Forward declarations for GC helpers */
static int ctx_gc (JSContext *ctx, int allow_grow);
static int ctx_gc (JSContext *ctx, int allow_grow, size_t alloc_size);
static JSValue gc_copy_value (JSContext *ctx, JSValue v, uint8_t *from_base, uint8_t *from_end, uint8_t *to_base, uint8_t **to_free, uint8_t *to_end);
static void gc_scan_object (JSContext *ctx, void *ptr, uint8_t *from_base, uint8_t *from_end, uint8_t *to_base, uint8_t **to_free, uint8_t *to_end);
static size_t gc_object_size (void *ptr);
@@ -2747,8 +2738,9 @@ static void gc_scan_bytecode_cpool (JSContext *ctx, JSValue v, uint8_t *from_bas
}
/* Cheney copying GC - collect garbage and compact live objects
allow_grow: if true, grow heap when <20% recovered; if false, keep same size */
static int ctx_gc (JSContext *ctx, int allow_grow) {
allow_grow: if true, grow heap when recovery is poor
alloc_size: the allocation that triggered GC used to size the new block */
static int ctx_gc (JSContext *ctx, int allow_grow, size_t alloc_size) {
JSRuntime *rt = ctx->rt;
size_t old_used = ctx->heap_free - ctx->heap_base;
size_t old_heap_size = ctx->current_block_size;
@@ -2763,8 +2755,17 @@ static int ctx_gc (JSContext *ctx, int allow_grow) {
printf("ctx_gc: from_base=%p from_end=%p size=%zu\n", (void*)from_base, (void*)from_end, old_heap_size);
#endif
/* Request new block from runtime */
/* Request new block from runtime.
When allow_grow is set and the pending allocation won't fit in the
current next_block_size, jump straight to a block that can hold
live_data + alloc_size instead of doubling one step at a time. */
size_t new_size = ctx->next_block_size;
if (allow_grow) {
size_t live_est = (size_t)(from_end - from_base); /* upper bound on live data */
size_t need = live_est + alloc_size;
while (new_size < need && new_size < (1ULL << BUDDY_MAX_ORDER))
new_size *= 2;
}
uint8_t *new_block = heap_block_alloc (rt, new_size);
if (!new_block) {
/* Try with same size */