Merge branch 'fix_gc' into runtime_rework
This commit is contained in:
66
source/buddy_debug.c
Normal file
66
source/buddy_debug.c
Normal file
@@ -0,0 +1,66 @@
|
||||
/* buddy_debug.c — ASCII visualization for buddy allocator
|
||||
Included from runtime.c only when DUMP_BUDDY is defined. */
|
||||
|
||||
static void buddy_dump(BuddyPool *pool, const char *op,
|
||||
uint8_t *block, uint8_t order) {
|
||||
if (!pool || !pool->base) return;
|
||||
|
||||
int levels = pool->max_order - BUDDY_MIN_ORDER + 1;
|
||||
|
||||
/* Bitmap: one byte per min-block slot */
|
||||
size_t num_slots = pool->total_size >> BUDDY_MIN_ORDER;
|
||||
/* Dynamic VLA — pool sizes vary now */
|
||||
uint8_t *bitmap = alloca(num_slots);
|
||||
memset(bitmap, 0, num_slots); /* 0 = allocated */
|
||||
|
||||
/* Walk all free lists and mark free slots */
|
||||
for (int i = 0; i < levels; i++) {
|
||||
for (BuddyBlock *p = pool->free_lists[i]; p; p = p->next) {
|
||||
size_t off = (uint8_t *)p - pool->base;
|
||||
size_t slot = off >> BUDDY_MIN_ORDER;
|
||||
size_t count = 1ULL << i; /* number of min-block slots in this block */
|
||||
for (size_t s = 0; s < count && (slot + s) < num_slots; s++)
|
||||
bitmap[slot + s] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Render 64-char ASCII bar */
|
||||
size_t slots_per_char = num_slots / 64;
|
||||
if (slots_per_char == 0) slots_per_char = 1;
|
||||
char bar[65];
|
||||
size_t total_free_slots = 0;
|
||||
for (int c = 0; c < 64; c++) {
|
||||
size_t base_slot = c * slots_per_char;
|
||||
size_t free_count = 0;
|
||||
for (size_t s = 0; s < slots_per_char && (base_slot + s) < num_slots; s++) {
|
||||
if (bitmap[base_slot + s]) free_count++;
|
||||
}
|
||||
total_free_slots += free_count;
|
||||
/* Majority vote: if more than half are free, show free */
|
||||
bar[c] = (free_count > slots_per_char / 2) ? '.' : '#';
|
||||
}
|
||||
bar[64] = '\0';
|
||||
|
||||
size_t blk_offset = block - pool->base;
|
||||
size_t blk_size = 1ULL << order;
|
||||
size_t total_free = total_free_slots << BUDDY_MIN_ORDER;
|
||||
size_t total_alloc = pool->total_size - total_free;
|
||||
|
||||
fprintf(stderr, "buddy %s: pool %zuKB order %u (%zuKB) @ +%zuKB allocs=%u\n",
|
||||
op, pool->total_size / 1024, order, blk_size / 1024,
|
||||
blk_offset / 1024, pool->alloc_count);
|
||||
fprintf(stderr, " [%s]\n", bar);
|
||||
fprintf(stderr, " alloc: %zuKB free: %zuKB total: %zuKB\n",
|
||||
total_alloc / 1024, total_free / 1024, pool->total_size / 1024);
|
||||
|
||||
/* Print free list population */
|
||||
fprintf(stderr, " free lists:");
|
||||
for (int i = 0; i < levels; i++) {
|
||||
int count = 0;
|
||||
for (BuddyBlock *p = pool->free_lists[i]; p; p = p->next)
|
||||
count++;
|
||||
if (count > 0)
|
||||
fprintf(stderr, " o%d:%d", i + BUDDY_MIN_ORDER, count);
|
||||
}
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
@@ -95,9 +95,12 @@
|
||||
/* test the GC by forcing it before each object allocation */
|
||||
// #define FORCE_GC_AT_MALLOC
|
||||
|
||||
#define POISON_HEAP
|
||||
/* POISON_HEAP: Use ASan's memory poisoning to detect stale pointer access */
|
||||
#ifdef POISON_HEAP
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* HEAP_CHECK: validate heap pointers at JS_VALUE_GET_* macros */
|
||||
// #define HEAP_CHECK
|
||||
|
||||
#if defined(__has_feature)
|
||||
#if __has_feature(address_sanitizer)
|
||||
#define HAVE_ASAN 1
|
||||
@@ -106,25 +109,6 @@
|
||||
#define HAVE_ASAN 1
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_ASAN
|
||||
#include <sanitizer/asan_interface.h>
|
||||
#define gc_poison_region(addr, size) __asan_poison_memory_region((addr), (size))
|
||||
#define gc_unpoison_region(addr, size) __asan_unpoison_memory_region((addr), (size))
|
||||
#else
|
||||
/* Fallback: no-op when not building with ASan */
|
||||
#define gc_poison_region(addr, size) ((void)0)
|
||||
#define gc_unpoison_region(addr, size) ((void)0)
|
||||
#endif
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static inline size_t poison_page_align(size_t size) {
|
||||
size_t ps = (size_t)sysconf(_SC_PAGESIZE);
|
||||
return (size + ps - 1) & ~(ps - 1);
|
||||
}
|
||||
#endif /* POISON_HEAP */
|
||||
|
||||
#ifdef HAVE_ASAN
|
||||
static struct JSContext *__asan_js_ctx;
|
||||
#endif
|
||||
@@ -303,14 +287,27 @@ typedef enum JSErrorEnum {
|
||||
|
||||
/* Forward declaration for bytecode freeing */
|
||||
|
||||
#define JS_VALUE_GET_BLOB(v) ((JSBlob *)JS_VALUE_GET_PTR (v))
|
||||
#define JS_VALUE_GET_CODE(v) (JS_VALUE_GET_PTR (v))
|
||||
|
||||
#ifdef HEAP_CHECK
|
||||
void heap_check_fail(void *ptr, struct JSContext *ctx);
|
||||
#define JS_VALUE_GET_ARRAY(v) ((JSArray *)heap_check_chase(ctx, v))
|
||||
#define JS_VALUE_GET_OBJ(v) ((JSRecord *)heap_check_chase(ctx, v))
|
||||
#define JS_VALUE_GET_TEXT(v) ((JSText *)heap_check_chase(ctx, v))
|
||||
#define JS_VALUE_GET_FUNCTION(v) ((JSFunction *)heap_check_chase(ctx, v))
|
||||
#define JS_VALUE_GET_FRAME(v) ((JSFrame *)heap_check_chase(ctx, v))
|
||||
#define JS_VALUE_GET_STRING(v) ((JSText *)heap_check_chase(ctx, v))
|
||||
#define JS_VALUE_GET_RECORD(v) ((JSRecord *)heap_check_chase(ctx, v))
|
||||
#else
|
||||
#define JS_VALUE_GET_ARRAY(v) ((JSArray *)chase (v))
|
||||
#define JS_VALUE_GET_OBJ(v) ((JSRecord *)chase (v))
|
||||
#define JS_VALUE_GET_TEXT(v) ((JSText *)chase (v))
|
||||
#define JS_VALUE_GET_BLOB(v) ((JSBlob *)JS_VALUE_GET_PTR (v))
|
||||
#define JS_VALUE_GET_FUNCTION(v) ((JSFunction *)chase (v))
|
||||
#define JS_VALUE_GET_FRAME(v) ((JSFrame *)chase (v))
|
||||
#define JS_VALUE_GET_CODE(v) (JS_VALUE_GET_PTR (v))
|
||||
#define JS_VALUE_GET_STRING(v) ((JSText *)chase (v))
|
||||
#define JS_VALUE_GET_RECORD(v) ((JSRecord *)chase (v))
|
||||
#endif
|
||||
|
||||
/* Compatibility: JS_TAG_STRING is an alias for text type checks */
|
||||
#define JS_TAG_STRING JS_TAG_STRING_IMM
|
||||
@@ -333,9 +330,8 @@ static inline objhdr_t objhdr_set_cap56 (objhdr_t h, uint64_t cap) {
|
||||
#else
|
||||
#define BUDDY_MIN_ORDER 9 /* 512B minimum on 32-bit */
|
||||
#endif
|
||||
#define BUDDY_MAX_ORDER 28 /* 256MB maximum */
|
||||
#define BUDDY_LEVELS (BUDDY_MAX_ORDER - BUDDY_MIN_ORDER + 1)
|
||||
#define BUDDY_POOL_SIZE (1ULL << BUDDY_MAX_ORDER)
|
||||
#define BUDDY_MAX_LEVELS 40 /* supports pools up to 2^(BUDDY_MIN_ORDER+39) */
|
||||
#define BUDDY_DEFAULT_POOL (1ULL << 24) /* 16MB initial pool */
|
||||
|
||||
typedef struct BuddyBlock {
|
||||
struct BuddyBlock *next;
|
||||
@@ -344,15 +340,26 @@ typedef struct BuddyBlock {
|
||||
uint8_t is_free;
|
||||
} BuddyBlock;
|
||||
|
||||
typedef struct BuddyPool {
|
||||
struct BuddyPool *next;
|
||||
uint8_t *base;
|
||||
size_t total_size;
|
||||
uint8_t max_order; /* log2(total_size) */
|
||||
uint32_t alloc_count; /* outstanding allocations */
|
||||
BuddyBlock *free_lists[BUDDY_MAX_LEVELS];
|
||||
} BuddyPool;
|
||||
|
||||
typedef struct BuddyAllocator {
|
||||
uint8_t *base; /* 256MB base address */
|
||||
size_t total_size; /* 256MB */
|
||||
BuddyBlock *free_lists[BUDDY_LEVELS];
|
||||
uint8_t initialized;
|
||||
BuddyPool *pools; /* linked list, newest first */
|
||||
size_t next_pool_size; /* next pool doubles from this */
|
||||
size_t initial_size; /* starting pool size */
|
||||
size_t cap; /* 0 = no cap */
|
||||
size_t total_mapped; /* sum of all pool sizes */
|
||||
} BuddyAllocator;
|
||||
|
||||
/* Forward declarations for buddy allocator functions */
|
||||
static void buddy_destroy (BuddyAllocator *b);
|
||||
static size_t buddy_max_block (BuddyAllocator *b);
|
||||
|
||||
/* controls a host of contexts, handing out memory and scheduling */
|
||||
struct JSRuntime {
|
||||
@@ -1181,6 +1188,17 @@ static inline int is_ct_ptr (JSContext *ctx, void *ptr) {
|
||||
return (uint8_t *)ptr >= ctx->ct_base && (uint8_t *)ptr < ctx->ct_end;
|
||||
}
|
||||
|
||||
#ifdef HEAP_CHECK
|
||||
static inline objhdr_t *heap_check_chase(JSContext *ctx, JSValue v) {
|
||||
objhdr_t *oh = chase(v);
|
||||
uint8_t *p = (uint8_t *)oh;
|
||||
if (!((p >= ctx->heap_base && p < ctx->heap_free) ||
|
||||
(p >= ctx->ct_base && p < ctx->ct_end)))
|
||||
heap_check_fail(oh, ctx);
|
||||
return oh;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Intern a UTF-32 string as a stone text, returning a JSValue string */
|
||||
|
||||
/* Create a stoned, interned key from a UTF-8 C string.
|
||||
@@ -1214,8 +1232,6 @@ typedef struct JSRegExp {
|
||||
#define obj_is_stone(rec) objhdr_s ((rec)->mist_hdr)
|
||||
#define obj_set_stone(rec) ((rec)->mist_hdr = objhdr_set_s ((rec)->mist_hdr, true))
|
||||
|
||||
#define JS_VALUE_GET_RECORD(v) ((JSRecord *)chase (v))
|
||||
|
||||
/* Get prototype from object (works for both JSRecord and JSRecord since they
|
||||
* share layout) */
|
||||
#define JS_OBJ_GET_PROTO(p) (JS_IsNull(((JSRecord *)(p))->proto) ? NULL : (JSRecord *)JS_VALUE_GET_PTR(((JSRecord *)(p))->proto))
|
||||
|
||||
@@ -305,6 +305,7 @@ typedef JSValue JSCFunctionData (JSContext *ctx, JSValue this_val,
|
||||
JSRuntime *JS_NewRuntime (void);
|
||||
void JS_FreeRuntime (JSRuntime *rt);
|
||||
void JS_SetMemoryLimit (JSRuntime *rt, size_t limit);
|
||||
void JS_SetPoolSize (JSRuntime *rt, size_t initial, size_t cap);
|
||||
|
||||
JSContext *JS_NewContext (JSRuntime *rt);
|
||||
JSContext *JS_NewContextWithHeapSize (JSRuntime *rt, size_t heap_size);
|
||||
|
||||
340
source/runtime.c
340
source/runtime.c
@@ -28,6 +28,55 @@
|
||||
#define WOTA_IMPLEMENTATION
|
||||
#include "quickjs-internal.h"
|
||||
|
||||
// #define DUMP_BUDDY
|
||||
#ifdef DUMP_BUDDY
|
||||
#include "buddy_debug.c"
|
||||
#endif
|
||||
|
||||
#ifdef HEAP_CHECK
|
||||
void heap_check_fail(void *ptr, JSContext *ctx) {
|
||||
uint8_t *p = (uint8_t *)ptr;
|
||||
fprintf(stderr, "\n=== HEAP_CHECK: invalid heap pointer ===\n");
|
||||
fprintf(stderr, " pointer: %p\n", ptr);
|
||||
fprintf(stderr, " heap: [%p, %p)\n",
|
||||
(void *)ctx->heap_base, (void *)ctx->heap_free);
|
||||
fprintf(stderr, " ct_pool: [%p, %p)\n",
|
||||
(void *)ctx->ct_base, (void *)ctx->ct_end);
|
||||
if (!JS_IsNull(ctx->reg_current_frame)) {
|
||||
fprintf(stderr, " stack trace:\n");
|
||||
JSFrame *frame = (JSFrame *)JS_VALUE_GET_PTR(ctx->reg_current_frame);
|
||||
uint32_t pc = ctx->current_register_pc;
|
||||
int first = 1;
|
||||
while (frame) {
|
||||
objhdr_t hdr = *(objhdr_t *)JS_VALUE_GET_PTR(frame->function);
|
||||
if (objhdr_type(hdr) != OBJ_FUNCTION) break;
|
||||
JSFunction *fn = (JSFunction *)JS_VALUE_GET_PTR(frame->function);
|
||||
const char *name = NULL, *file = NULL;
|
||||
uint16_t line = 0;
|
||||
if (fn->kind == JS_FUNC_KIND_REGISTER && fn->u.reg.code) {
|
||||
JSCodeRegister *code = fn->u.reg.code;
|
||||
file = code->filename_cstr;
|
||||
name = code->name_cstr;
|
||||
if (!first)
|
||||
pc = (uint32_t)(JS_VALUE_GET_INT(frame->address) >> 16);
|
||||
if (code->line_table && pc < code->instr_count)
|
||||
line = code->line_table[pc].line;
|
||||
}
|
||||
fprintf(stderr, " %s (%s:%u)\n",
|
||||
name ? name : "<anonymous>",
|
||||
file ? file : "<unknown>", line);
|
||||
if (JS_IsNull(frame->caller)) break;
|
||||
frame = (JSFrame *)JS_VALUE_GET_PTR(frame->caller);
|
||||
first = 0;
|
||||
pc = 0;
|
||||
}
|
||||
}
|
||||
fprintf(stderr, "=======================================\n");
|
||||
fflush(stderr);
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline JS_BOOL JS_IsInteger (JSValue v) {
|
||||
if (JS_VALUE_GET_TAG(v) == JS_TAG_INT) return true;
|
||||
if (JS_VALUE_GET_TAG(v) != JS_TAG_SHORT_FLOAT) return false;
|
||||
@@ -731,7 +780,7 @@ void *js_malloc (JSContext *ctx, size_t size) {
|
||||
size_t live = (size_t)((uint8_t *)ctx->heap_free - (uint8_t *)ctx->heap_base);
|
||||
size_t need = live + size;
|
||||
size_t ns = ctx->current_block_size;
|
||||
while (ns < need && ns < (1ULL << BUDDY_MAX_ORDER))
|
||||
while (ns < need && ns < buddy_max_block(&ctx->rt->buddy))
|
||||
ns *= 2;
|
||||
#ifdef DUMP_GC
|
||||
printf (" growing %zu -> %zu for %zu byte alloc (live %zu)\n",
|
||||
@@ -926,46 +975,259 @@ static int init_class_range (JSContext *ctx, JSClass const *tab, int start, int
|
||||
}
|
||||
|
||||
|
||||
/* Destroy buddy allocator and free pool */
|
||||
void buddy_destroy (BuddyAllocator *b) {
|
||||
if (!b->initialized) return;
|
||||
/* Create a new buddy pool of the given size */
|
||||
static BuddyPool *buddy_pool_new(size_t pool_size) {
|
||||
BuddyPool *pool = js_mallocz_rt(sizeof(BuddyPool));
|
||||
if (!pool) return NULL;
|
||||
|
||||
free (b->base);
|
||||
b->base = NULL;
|
||||
b->initialized = 0;
|
||||
for (int i = 0; i < BUDDY_LEVELS; i++) {
|
||||
b->free_lists[i] = NULL;
|
||||
pool->base = mmap(NULL, pool_size, PROT_READ | PROT_WRITE,
|
||||
MAP_ANON | MAP_PRIVATE, -1, 0);
|
||||
if (pool->base == MAP_FAILED) {
|
||||
js_free_rt(pool);
|
||||
return NULL;
|
||||
}
|
||||
pool->total_size = pool_size;
|
||||
|
||||
/* Compute max_order = log2(pool_size) */
|
||||
uint8_t order = BUDDY_MIN_ORDER;
|
||||
while ((1ULL << order) < pool_size)
|
||||
order++;
|
||||
pool->max_order = order;
|
||||
pool->alloc_count = 0;
|
||||
|
||||
for (int i = 0; i < BUDDY_MAX_LEVELS; i++)
|
||||
pool->free_lists[i] = NULL;
|
||||
|
||||
/* One free block spanning the entire pool */
|
||||
BuddyBlock *blk = (BuddyBlock *)pool->base;
|
||||
blk->order = pool->max_order;
|
||||
blk->is_free = 1;
|
||||
blk->next = NULL;
|
||||
blk->prev = NULL;
|
||||
int level = pool->max_order - BUDDY_MIN_ORDER;
|
||||
pool->free_lists[level] = blk;
|
||||
|
||||
return pool;
|
||||
}
|
||||
|
||||
/* Try to allocate from a specific pool */
|
||||
static void *buddy_pool_alloc(BuddyPool *pool, uint8_t order) {
|
||||
int level = order - BUDDY_MIN_ORDER;
|
||||
int levels = pool->max_order - BUDDY_MIN_ORDER + 1;
|
||||
|
||||
/* Search for a free block at this level or higher */
|
||||
int found = -1;
|
||||
for (int i = level; i < levels; i++) {
|
||||
if (pool->free_lists[i]) {
|
||||
found = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found < 0)
|
||||
return NULL;
|
||||
|
||||
/* Remove the block from its free list */
|
||||
BuddyBlock *blk = pool->free_lists[found];
|
||||
pool->free_lists[found] = blk->next;
|
||||
if (blk->next)
|
||||
blk->next->prev = NULL;
|
||||
|
||||
/* Split down to the target level */
|
||||
uint8_t cur_order = BUDDY_MIN_ORDER + found;
|
||||
while (cur_order > order) {
|
||||
cur_order--;
|
||||
int split_level = cur_order - BUDDY_MIN_ORDER;
|
||||
/* Right half becomes a free buddy */
|
||||
uint8_t *right = (uint8_t *)blk + (1ULL << cur_order);
|
||||
BuddyBlock *rblk = (BuddyBlock *)right;
|
||||
rblk->order = cur_order;
|
||||
rblk->is_free = 1;
|
||||
rblk->prev = NULL;
|
||||
rblk->next = pool->free_lists[split_level];
|
||||
if (rblk->next)
|
||||
rblk->next->prev = rblk;
|
||||
pool->free_lists[split_level] = rblk;
|
||||
}
|
||||
|
||||
pool->alloc_count++;
|
||||
return (void *)blk;
|
||||
}
|
||||
|
||||
/* Allocate a block of the given size */
|
||||
static void *buddy_alloc(BuddyAllocator *b, size_t size) {
|
||||
/* Lazy-init: create first pool on demand */
|
||||
if (!b->pools) {
|
||||
BuddyPool *pool = buddy_pool_new(b->initial_size);
|
||||
if (!pool) {
|
||||
fprintf(stderr, "buddy_alloc: initial pool mmap failed\n");
|
||||
abort();
|
||||
}
|
||||
pool->next = NULL;
|
||||
b->pools = pool;
|
||||
b->total_mapped = pool->total_size;
|
||||
b->next_pool_size = b->initial_size * 2;
|
||||
}
|
||||
|
||||
/* Compute order from size */
|
||||
uint8_t order = BUDDY_MIN_ORDER;
|
||||
while ((1ULL << order) < size)
|
||||
order++;
|
||||
|
||||
/* Walk pools, try to allocate from each */
|
||||
for (BuddyPool *pool = b->pools; pool; pool = pool->next) {
|
||||
if (order <= pool->max_order) {
|
||||
void *ptr = buddy_pool_alloc(pool, order);
|
||||
if (ptr) {
|
||||
#ifdef DUMP_BUDDY
|
||||
buddy_dump(pool, "alloc", (uint8_t *)ptr, order);
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* No pool could satisfy — create a new one */
|
||||
size_t new_pool_size = b->next_pool_size;
|
||||
size_t needed = 1ULL << order;
|
||||
if (new_pool_size < needed)
|
||||
new_pool_size = needed;
|
||||
/* Check cap */
|
||||
if (b->cap && b->total_mapped + new_pool_size > b->cap)
|
||||
return NULL;
|
||||
|
||||
BuddyPool *pool = buddy_pool_new(new_pool_size);
|
||||
if (!pool)
|
||||
return NULL;
|
||||
|
||||
/* Prepend to list */
|
||||
pool->next = b->pools;
|
||||
b->pools = pool;
|
||||
b->total_mapped += pool->total_size;
|
||||
b->next_pool_size = new_pool_size * 2;
|
||||
|
||||
void *ptr = buddy_pool_alloc(pool, order);
|
||||
#ifdef DUMP_BUDDY
|
||||
if (ptr)
|
||||
buddy_dump(pool, "alloc", (uint8_t *)ptr, order);
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* Free a block and coalesce with its buddy if possible */
|
||||
static void buddy_free(BuddyAllocator *b, void *ptr, size_t size) {
|
||||
uint8_t order = BUDDY_MIN_ORDER;
|
||||
while ((1ULL << order) < size)
|
||||
order++;
|
||||
|
||||
/* Find the pool containing ptr */
|
||||
BuddyPool *pool = NULL;
|
||||
BuddyPool **prev_link = &b->pools;
|
||||
for (BuddyPool *p = b->pools; p; prev_link = &p->next, p = p->next) {
|
||||
if ((uint8_t *)ptr >= p->base &&
|
||||
(uint8_t *)ptr < p->base + p->total_size) {
|
||||
pool = p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!pool) {
|
||||
fprintf(stderr, "buddy_free: ptr %p not in any pool\n", ptr);
|
||||
abort();
|
||||
}
|
||||
|
||||
uint8_t *block = (uint8_t *)ptr;
|
||||
|
||||
#ifdef DUMP_BUDDY
|
||||
buddy_dump(pool, "free", block, order);
|
||||
#endif
|
||||
|
||||
/* Coalesce with buddy */
|
||||
while (order < pool->max_order) {
|
||||
size_t offset = block - pool->base;
|
||||
size_t buddy_offset = offset ^ (1ULL << order);
|
||||
uint8_t *buddy_addr = pool->base + buddy_offset;
|
||||
|
||||
/* Check if buddy is on the free list at this level */
|
||||
int level = order - BUDDY_MIN_ORDER;
|
||||
BuddyBlock *buddy = NULL;
|
||||
for (BuddyBlock *p = pool->free_lists[level]; p; p = p->next) {
|
||||
if ((uint8_t *)p == buddy_addr) {
|
||||
buddy = p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!buddy)
|
||||
break;
|
||||
|
||||
/* Remove buddy from free list */
|
||||
if (buddy->prev)
|
||||
buddy->prev->next = buddy->next;
|
||||
else
|
||||
pool->free_lists[level] = buddy->next;
|
||||
if (buddy->next)
|
||||
buddy->next->prev = buddy->prev;
|
||||
|
||||
/* Merge: take the lower address */
|
||||
if (buddy_addr < block)
|
||||
block = buddy_addr;
|
||||
order++;
|
||||
}
|
||||
|
||||
/* Add merged block to free list */
|
||||
int level = order - BUDDY_MIN_ORDER;
|
||||
BuddyBlock *blk = (BuddyBlock *)block;
|
||||
blk->order = order;
|
||||
blk->is_free = 1;
|
||||
blk->prev = NULL;
|
||||
blk->next = pool->free_lists[level];
|
||||
if (blk->next)
|
||||
blk->next->prev = blk;
|
||||
pool->free_lists[level] = blk;
|
||||
|
||||
pool->alloc_count--;
|
||||
|
||||
/* Release empty pools (but keep at least one) */
|
||||
if (pool->alloc_count == 0 && b->pools != pool) {
|
||||
*prev_link = pool->next;
|
||||
b->total_mapped -= pool->total_size;
|
||||
munmap(pool->base, pool->total_size);
|
||||
js_free_rt(pool);
|
||||
} else if (pool->alloc_count == 0 && pool->next) {
|
||||
/* pool is the head but not the only one — unlink it */
|
||||
b->pools = pool->next;
|
||||
b->total_mapped -= pool->total_size;
|
||||
munmap(pool->base, pool->total_size);
|
||||
js_free_rt(pool);
|
||||
}
|
||||
}
|
||||
|
||||
/* Destroy buddy allocator and free all pools */
|
||||
void buddy_destroy (BuddyAllocator *b) {
|
||||
BuddyPool *pool = b->pools;
|
||||
while (pool) {
|
||||
BuddyPool *next = pool->next;
|
||||
munmap(pool->base, pool->total_size);
|
||||
js_free_rt(pool);
|
||||
pool = next;
|
||||
}
|
||||
b->pools = NULL;
|
||||
b->total_mapped = 0;
|
||||
}
|
||||
|
||||
/* Maximum block size the buddy allocator can hand out */
|
||||
static size_t buddy_max_block(BuddyAllocator *b) {
|
||||
return b->cap ? b->cap : SIZE_MAX;
|
||||
}
|
||||
|
||||
/* ============================================================
|
||||
Heap block allocation wrappers
|
||||
In POISON_HEAP mode, use malloc so poisoned memory stays poisoned.
|
||||
Otherwise use buddy allocator for efficiency.
|
||||
============================================================ */
|
||||
|
||||
static void *heap_block_alloc(JSRuntime *rt, size_t size) {
|
||||
#ifdef POISON_HEAP
|
||||
(void)rt;
|
||||
size = poison_page_align(size);
|
||||
void *p = mmap(NULL, size, PROT_READ | PROT_WRITE,
|
||||
MAP_ANON | MAP_PRIVATE, -1, 0);
|
||||
return (p == MAP_FAILED) ? NULL : p;
|
||||
#else
|
||||
return buddy_alloc(&rt->buddy, size);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void heap_block_free(JSRuntime *rt, void *ptr, size_t size) {
|
||||
#ifdef POISON_HEAP
|
||||
(void)rt;
|
||||
/* mmap'd memory is intentionally never munmap'd so virtual addresses
|
||||
are never reused (preventing stale pointer aliasing). Pages stay
|
||||
resident because chase() reads forwarding pointers from old blocks. */
|
||||
gc_poison_region(ptr, size);
|
||||
#else
|
||||
buddy_free(&rt->buddy, ptr, size);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ============================================================
|
||||
@@ -1189,19 +1451,13 @@ int ctx_gc (JSContext *ctx, int allow_grow, size_t alloc_size) {
|
||||
if (allow_grow) {
|
||||
if (ctx->next_block_size > new_size)
|
||||
new_size = ctx->next_block_size;
|
||||
while (new_size < alloc_size && new_size < (1ULL << BUDDY_MAX_ORDER))
|
||||
while (new_size < alloc_size && new_size < buddy_max_block(&ctx->rt->buddy))
|
||||
new_size *= 2;
|
||||
}
|
||||
#ifdef POISON_HEAP
|
||||
new_size = poison_page_align(new_size);
|
||||
#endif
|
||||
uint8_t *new_block = heap_block_alloc (rt, new_size);
|
||||
if (!new_block) {
|
||||
/* Try with same size */
|
||||
new_size = ctx->current_block_size;
|
||||
#ifdef POISON_HEAP
|
||||
new_size = poison_page_align(new_size);
|
||||
#endif
|
||||
new_block = heap_block_alloc (rt, new_size);
|
||||
if (!new_block) return -1;
|
||||
}
|
||||
@@ -1329,7 +1585,7 @@ int ctx_gc (JSContext *ctx, int allow_grow, size_t alloc_size) {
|
||||
#endif
|
||||
if (allow_grow && recovered > 0 && old_used > 0 && recovered < old_used / 5) {
|
||||
size_t doubled = new_size * 2;
|
||||
if (doubled <= (1ULL << BUDDY_MAX_ORDER)) {
|
||||
if (doubled <= buddy_max_block(&ctx->rt->buddy)) {
|
||||
ctx->next_block_size = doubled;
|
||||
#ifdef DUMP_GC
|
||||
will_grow = 1;
|
||||
@@ -1386,6 +1642,9 @@ JSRuntime *JS_NewRuntime (void) {
|
||||
if (!rt) return NULL;
|
||||
memset (rt, 0, sizeof (*rt));
|
||||
|
||||
rt->buddy.initial_size = BUDDY_DEFAULT_POOL;
|
||||
rt->buddy.next_pool_size = BUDDY_DEFAULT_POOL;
|
||||
|
||||
return rt;
|
||||
}
|
||||
|
||||
@@ -1393,6 +1652,12 @@ void JS_SetMemoryLimit (JSRuntime *rt, size_t limit) {
|
||||
rt->malloc_limit = limit;
|
||||
}
|
||||
|
||||
void JS_SetPoolSize (JSRuntime *rt, size_t initial, size_t cap) {
|
||||
rt->buddy.initial_size = initial;
|
||||
rt->buddy.next_pool_size = initial;
|
||||
rt->buddy.cap = cap;
|
||||
}
|
||||
|
||||
/* Helpers to call system memory functions (for memory allocated by external libs) */
|
||||
|
||||
#define malloc(s) malloc_is_forbidden (s)
|
||||
@@ -1443,7 +1708,7 @@ JSContext *JS_NewContextRawWithHeapSize (JSRuntime *rt, size_t heap_size) {
|
||||
|
||||
/* Round up to power of 2 for buddy allocator */
|
||||
size_t actual = min_size;
|
||||
while (actual < heap_size && actual < (1ULL << BUDDY_MAX_ORDER)) {
|
||||
while (actual < heap_size && actual < buddy_max_block(&rt->buddy)) {
|
||||
actual <<= 1;
|
||||
}
|
||||
heap_size = actual;
|
||||
@@ -1486,9 +1751,6 @@ JSContext *JS_NewContextRawWithHeapSize (JSRuntime *rt, size_t heap_size) {
|
||||
}
|
||||
|
||||
/* Allocate initial heap block for bump allocation */
|
||||
#ifdef POISON_HEAP
|
||||
heap_size = poison_page_align(heap_size);
|
||||
#endif
|
||||
ctx->current_block_size = heap_size;
|
||||
ctx->next_block_size = ctx->current_block_size;
|
||||
ctx->heap_base = heap_block_alloc (rt, ctx->current_block_size);
|
||||
|
||||
Reference in New Issue
Block a user