further refactor
This commit is contained in:
@@ -1,86 +0,0 @@
|
||||
// bench_arith.ce — arithmetic and number crunching benchmark
|
||||
// Tests: integer add/mul, float ops, loop counter overhead, conditionals
|
||||
|
||||
var time = use('time')
|
||||
|
||||
def iterations = 2000000
|
||||
|
||||
// 1. Integer sum in tight loop
|
||||
function bench_int_sum() {
|
||||
var i = 0
|
||||
var s = 0
|
||||
for (i = 0; i < iterations; i++) {
|
||||
s = s + i
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// 2. Integer multiply + mod (sieve-like)
|
||||
function bench_int_mul_mod() {
|
||||
var i = 0
|
||||
var s = 0
|
||||
for (i = 1; i < iterations; i++) {
|
||||
s = s + (i * 7 % 1000)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// 3. Float math — accumulate with division
|
||||
function bench_float_arith() {
|
||||
var i = 0
|
||||
var s = 0.5
|
||||
for (i = 1; i < iterations; i++) {
|
||||
s = s + 1.0 / i
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// 4. Nested loop with branch (fizzbuzz-like counter)
|
||||
function bench_branch() {
|
||||
var i = 0
|
||||
var fizz = 0
|
||||
var buzz = 0
|
||||
var fizzbuzz = 0
|
||||
for (i = 1; i <= iterations; i++) {
|
||||
if (i % 15 == 0) {
|
||||
fizzbuzz = fizzbuzz + 1
|
||||
} else if (i % 3 == 0) {
|
||||
fizz = fizz + 1
|
||||
} else if (i % 5 == 0) {
|
||||
buzz = buzz + 1
|
||||
}
|
||||
}
|
||||
return fizz + buzz + fizzbuzz
|
||||
}
|
||||
|
||||
// 5. Nested loop (small inner)
|
||||
function bench_nested() {
|
||||
var i = 0
|
||||
var j = 0
|
||||
var s = 0
|
||||
def outer = 5000
|
||||
def inner = 5000
|
||||
for (i = 0; i < outer; i++) {
|
||||
for (j = 0; j < inner; j++) {
|
||||
s = s + 1
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Run each and print timing
|
||||
function run(name, fn) {
|
||||
var start = time.number()
|
||||
var result = fn()
|
||||
var elapsed = time.number() - start
|
||||
var ms = whole(elapsed * 100000) / 100
|
||||
log.console(` ${name}: ${ms} ms (result: ${result})`)
|
||||
}
|
||||
|
||||
log.console("=== Arithmetic Benchmark ===")
|
||||
log.console(` iterations: ${iterations}`)
|
||||
run("int_sum ", bench_int_sum)
|
||||
run("int_mul_mod ", bench_int_mul_mod)
|
||||
run("float_arith ", bench_float_arith)
|
||||
run("branch ", bench_branch)
|
||||
run("nested_loop ", bench_nested)
|
||||
@@ -1,67 +0,0 @@
|
||||
// bench_arith.js — arithmetic and number crunching benchmark (QuickJS)
|
||||
|
||||
const iterations = 2000000;
|
||||
|
||||
function bench_int_sum() {
|
||||
let s = 0;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
s = s + i;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function bench_int_mul_mod() {
|
||||
let s = 0;
|
||||
for (let i = 1; i < iterations; i++) {
|
||||
s = s + (i * 7 % 1000);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function bench_float_arith() {
|
||||
let s = 0.5;
|
||||
for (let i = 1; i < iterations; i++) {
|
||||
s = s + 1.0 / i;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function bench_branch() {
|
||||
let fizz = 0, buzz = 0, fizzbuzz = 0;
|
||||
for (let i = 1; i <= iterations; i++) {
|
||||
if (i % 15 === 0) {
|
||||
fizzbuzz = fizzbuzz + 1;
|
||||
} else if (i % 3 === 0) {
|
||||
fizz = fizz + 1;
|
||||
} else if (i % 5 === 0) {
|
||||
buzz = buzz + 1;
|
||||
}
|
||||
}
|
||||
return fizz + buzz + fizzbuzz;
|
||||
}
|
||||
|
||||
function bench_nested() {
|
||||
let s = 0;
|
||||
const outer = 5000, inner = 5000;
|
||||
for (let i = 0; i < outer; i++) {
|
||||
for (let j = 0; j < inner; j++) {
|
||||
s = s + 1;
|
||||
}
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function run(name, fn) {
|
||||
const start = performance.now();
|
||||
const result = fn();
|
||||
const elapsed = performance.now() - start;
|
||||
console.log(` ${name}: ${elapsed.toFixed(2)} ms (result: ${result})`);
|
||||
}
|
||||
|
||||
console.log("=== Arithmetic Benchmark ===");
|
||||
console.log(` iterations: ${iterations}`);
|
||||
run("int_sum ", bench_int_sum);
|
||||
run("int_mul_mod ", bench_int_mul_mod);
|
||||
run("float_arith ", bench_float_arith);
|
||||
run("branch ", bench_branch);
|
||||
run("nested_loop ", bench_nested);
|
||||
@@ -1,68 +0,0 @@
|
||||
-- bench_arith.lua — arithmetic and number crunching benchmark (Lua)
|
||||
|
||||
local iterations = 2000000
|
||||
local clock = os.clock
|
||||
|
||||
local function bench_int_sum()
|
||||
local s = 0
|
||||
for i = 0, iterations - 1 do
|
||||
s = s + i
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function bench_int_mul_mod()
|
||||
local s = 0
|
||||
for i = 1, iterations - 1 do
|
||||
s = s + (i * 7 % 1000)
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function bench_float_arith()
|
||||
local s = 0.5
|
||||
for i = 1, iterations - 1 do
|
||||
s = s + 1.0 / i
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function bench_branch()
|
||||
local fizz, buzz, fizzbuzz = 0, 0, 0
|
||||
for i = 1, iterations do
|
||||
if i % 15 == 0 then
|
||||
fizzbuzz = fizzbuzz + 1
|
||||
elseif i % 3 == 0 then
|
||||
fizz = fizz + 1
|
||||
elseif i % 5 == 0 then
|
||||
buzz = buzz + 1
|
||||
end
|
||||
end
|
||||
return fizz + buzz + fizzbuzz
|
||||
end
|
||||
|
||||
local function bench_nested()
|
||||
local s = 0
|
||||
local outer, inner = 5000, 5000
|
||||
for i = 0, outer - 1 do
|
||||
for j = 0, inner - 1 do
|
||||
s = s + 1
|
||||
end
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function run(name, fn)
|
||||
local start = clock()
|
||||
local result = fn()
|
||||
local elapsed = (clock() - start) * 1000
|
||||
print(string.format(" %s: %.2f ms (result: %s)", name, elapsed, tostring(result)))
|
||||
end
|
||||
|
||||
print("=== Arithmetic Benchmark ===")
|
||||
print(string.format(" iterations: %d", iterations))
|
||||
run("int_sum ", bench_int_sum)
|
||||
run("int_mul_mod ", bench_int_mul_mod)
|
||||
run("float_arith ", bench_float_arith)
|
||||
run("branch ", bench_branch)
|
||||
run("nested_loop ", bench_nested)
|
||||
113
bench_array.ce
113
bench_array.ce
@@ -1,113 +0,0 @@
|
||||
// bench_array.ce — array operation benchmark
|
||||
// Tests: sequential access, push/build, index write, sum reduction, sort
|
||||
|
||||
var time = use('time')
|
||||
|
||||
def size = 100000
|
||||
|
||||
// 1. Build array with push
|
||||
function bench_push() {
|
||||
var a = []
|
||||
var i = 0
|
||||
for (i = 0; i < size; i++) {
|
||||
a[] = i
|
||||
}
|
||||
return length(a)
|
||||
}
|
||||
|
||||
// 2. Index write into preallocated array
|
||||
function bench_index_write() {
|
||||
var a = array(size, 0)
|
||||
var i = 0
|
||||
for (i = 0; i < size; i++) {
|
||||
a[i] = i
|
||||
}
|
||||
return a[size - 1]
|
||||
}
|
||||
|
||||
// 3. Sequential read and sum
|
||||
function bench_seq_read() {
|
||||
var a = array(size, 0)
|
||||
var i = 0
|
||||
for (i = 0; i < size; i++) {
|
||||
a[i] = i
|
||||
}
|
||||
var s = 0
|
||||
for (i = 0; i < size; i++) {
|
||||
s = s + a[i]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// 4. Reverse array in-place
|
||||
function bench_reverse() {
|
||||
var a = array(size, 0)
|
||||
var i = 0
|
||||
for (i = 0; i < size; i++) {
|
||||
a[i] = i
|
||||
}
|
||||
var lo = 0
|
||||
var hi = size - 1
|
||||
var tmp = 0
|
||||
while (lo < hi) {
|
||||
tmp = a[lo]
|
||||
a[lo] = a[hi]
|
||||
a[hi] = tmp
|
||||
lo = lo + 1
|
||||
hi = hi - 1
|
||||
}
|
||||
return a[0]
|
||||
}
|
||||
|
||||
// 5. Nested array access (matrix-like, 300x300)
|
||||
function bench_matrix() {
|
||||
def n = 300
|
||||
var mat = array(n, null)
|
||||
var i = 0
|
||||
var j = 0
|
||||
for (i = 0; i < n; i++) {
|
||||
mat[i] = array(n, 0)
|
||||
for (j = 0; j < n; j++) {
|
||||
mat[i][j] = i * n + j
|
||||
}
|
||||
}
|
||||
// sum diagonal
|
||||
var s = 0
|
||||
for (i = 0; i < n; i++) {
|
||||
s = s + mat[i][i]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// 6. filter-like: count evens
|
||||
function bench_filter_count() {
|
||||
var a = array(size, 0)
|
||||
var i = 0
|
||||
for (i = 0; i < size; i++) {
|
||||
a[i] = i
|
||||
}
|
||||
var count = 0
|
||||
for (i = 0; i < size; i++) {
|
||||
if (a[i] % 2 == 0) {
|
||||
count = count + 1
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
function run(name, fn) {
|
||||
var start = time.number()
|
||||
var result = fn()
|
||||
var elapsed = time.number() - start
|
||||
var ms = whole(elapsed * 100000) / 100
|
||||
log.console(` ${name}: ${ms} ms (result: ${result})`)
|
||||
}
|
||||
|
||||
log.console("=== Array Benchmark ===")
|
||||
log.console(` size: ${size}`)
|
||||
run("push ", bench_push)
|
||||
run("index_write ", bench_index_write)
|
||||
run("seq_read_sum ", bench_seq_read)
|
||||
run("reverse ", bench_reverse)
|
||||
run("matrix_300 ", bench_matrix)
|
||||
run("filter_count ", bench_filter_count)
|
||||
@@ -1,93 +0,0 @@
|
||||
// bench_array.js — array operation benchmark (QuickJS)
|
||||
|
||||
const size = 100000;
|
||||
|
||||
function bench_push() {
|
||||
let a = [];
|
||||
for (let i = 0; i < size; i++) {
|
||||
a.push(i);
|
||||
}
|
||||
return a.length;
|
||||
}
|
||||
|
||||
function bench_index_write() {
|
||||
let a = new Array(size).fill(0);
|
||||
for (let i = 0; i < size; i++) {
|
||||
a[i] = i;
|
||||
}
|
||||
return a[size - 1];
|
||||
}
|
||||
|
||||
function bench_seq_read() {
|
||||
let a = new Array(size).fill(0);
|
||||
for (let i = 0; i < size; i++) {
|
||||
a[i] = i;
|
||||
}
|
||||
let s = 0;
|
||||
for (let i = 0; i < size; i++) {
|
||||
s = s + a[i];
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function bench_reverse() {
|
||||
let a = new Array(size).fill(0);
|
||||
for (let i = 0; i < size; i++) {
|
||||
a[i] = i;
|
||||
}
|
||||
let lo = 0, hi = size - 1, tmp;
|
||||
while (lo < hi) {
|
||||
tmp = a[lo];
|
||||
a[lo] = a[hi];
|
||||
a[hi] = tmp;
|
||||
lo = lo + 1;
|
||||
hi = hi - 1;
|
||||
}
|
||||
return a[0];
|
||||
}
|
||||
|
||||
function bench_matrix() {
|
||||
const n = 300;
|
||||
let mat = new Array(n);
|
||||
for (let i = 0; i < n; i++) {
|
||||
mat[i] = new Array(n).fill(0);
|
||||
for (let j = 0; j < n; j++) {
|
||||
mat[i][j] = i * n + j;
|
||||
}
|
||||
}
|
||||
let s = 0;
|
||||
for (let i = 0; i < n; i++) {
|
||||
s = s + mat[i][i];
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function bench_filter_count() {
|
||||
let a = new Array(size).fill(0);
|
||||
for (let i = 0; i < size; i++) {
|
||||
a[i] = i;
|
||||
}
|
||||
let count = 0;
|
||||
for (let i = 0; i < size; i++) {
|
||||
if (a[i] % 2 === 0) {
|
||||
count = count + 1;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
function run(name, fn) {
|
||||
const start = performance.now();
|
||||
const result = fn();
|
||||
const elapsed = performance.now() - start;
|
||||
console.log(` ${name}: ${elapsed.toFixed(2)} ms (result: ${result})`);
|
||||
}
|
||||
|
||||
console.log("=== Array Benchmark ===");
|
||||
console.log(` size: ${size}`);
|
||||
run("push ", bench_push);
|
||||
run("index_write ", bench_index_write);
|
||||
run("seq_read_sum ", bench_seq_read);
|
||||
run("reverse ", bench_reverse);
|
||||
run("matrix_300 ", bench_matrix);
|
||||
run("filter_count ", bench_filter_count);
|
||||
@@ -1,93 +0,0 @@
|
||||
-- bench_array.lua — array operation benchmark (Lua)
|
||||
|
||||
local size = 100000
|
||||
local clock = os.clock
|
||||
|
||||
local function bench_push()
|
||||
local a = {}
|
||||
for i = 0, size - 1 do
|
||||
a[#a + 1] = i
|
||||
end
|
||||
return #a
|
||||
end
|
||||
|
||||
local function bench_index_write()
|
||||
local a = {}
|
||||
for i = 1, size do a[i] = 0 end
|
||||
for i = 1, size do
|
||||
a[i] = i - 1
|
||||
end
|
||||
return a[size]
|
||||
end
|
||||
|
||||
local function bench_seq_read()
|
||||
local a = {}
|
||||
for i = 1, size do
|
||||
a[i] = i - 1
|
||||
end
|
||||
local s = 0
|
||||
for i = 1, size do
|
||||
s = s + a[i]
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function bench_reverse()
|
||||
local a = {}
|
||||
for i = 1, size do
|
||||
a[i] = i - 1
|
||||
end
|
||||
local lo, hi = 1, size
|
||||
while lo < hi do
|
||||
a[lo], a[hi] = a[hi], a[lo]
|
||||
lo = lo + 1
|
||||
hi = hi - 1
|
||||
end
|
||||
return a[1]
|
||||
end
|
||||
|
||||
local function bench_matrix()
|
||||
local n = 300
|
||||
local mat = {}
|
||||
for i = 1, n do
|
||||
mat[i] = {}
|
||||
for j = 1, n do
|
||||
mat[i][j] = (i - 1) * n + (j - 1)
|
||||
end
|
||||
end
|
||||
local s = 0
|
||||
for i = 1, n do
|
||||
s = s + mat[i][i]
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function bench_filter_count()
|
||||
local a = {}
|
||||
for i = 1, size do
|
||||
a[i] = i - 1
|
||||
end
|
||||
local count = 0
|
||||
for i = 1, size do
|
||||
if a[i] % 2 == 0 then
|
||||
count = count + 1
|
||||
end
|
||||
end
|
||||
return count
|
||||
end
|
||||
|
||||
local function run(name, fn)
|
||||
local start = clock()
|
||||
local result = fn()
|
||||
local elapsed = (clock() - start) * 1000
|
||||
print(string.format(" %s: %.2f ms (result: %s)", name, elapsed, tostring(result)))
|
||||
end
|
||||
|
||||
print("=== Array Benchmark ===")
|
||||
print(string.format(" size: %d", size))
|
||||
run("push ", bench_push)
|
||||
run("index_write ", bench_index_write)
|
||||
run("seq_read_sum ", bench_seq_read)
|
||||
run("reverse ", bench_reverse)
|
||||
run("matrix_300 ", bench_matrix)
|
||||
run("filter_count ", bench_filter_count)
|
||||
21
bench_fib.ce
21
bench_fib.ce
@@ -1,21 +0,0 @@
|
||||
var time = use('time')
|
||||
|
||||
function fib(n) {
|
||||
if (n < 2) {
|
||||
return n
|
||||
}
|
||||
return fib(n - 1) + fib(n - 2)
|
||||
}
|
||||
|
||||
function run(name, fn) {
|
||||
var start = time.number()
|
||||
var result = fn()
|
||||
var elapsed = time.number() - start
|
||||
var ms = whole(elapsed * 100000) / 100
|
||||
log.console(` ${name}: ${ms} ms (result: ${result})`)
|
||||
}
|
||||
|
||||
log.console("=== Cell fib ===")
|
||||
run("fib(25)", function() { return fib(25) })
|
||||
run("fib(30)", function() { return fib(30) })
|
||||
run("fib(35)", function() { return fib(35) })
|
||||
194
bench_native.ce
194
bench_native.ce
@@ -1,194 +0,0 @@
|
||||
// bench_native.ce — compare VM vs native execution speed
|
||||
//
|
||||
// Usage:
|
||||
// cell --dev bench_native.ce <module.cm> [iterations]
|
||||
//
|
||||
// Compiles (if needed) and benchmarks a module via both VM and native dylib.
|
||||
// Reports median/mean timing per benchmark + speedup ratio.
|
||||
|
||||
var os = use('internal/os')
|
||||
var fd = use('fd')
|
||||
|
||||
if (length(args) < 1) {
|
||||
log.bench('usage: cell --dev bench_native.ce <module.cm> [iterations]')
|
||||
return
|
||||
}
|
||||
|
||||
var file = args[0]
|
||||
var name = file
|
||||
if (ends_with(name, '.cm')) {
|
||||
name = text(name, 0, length(name) - 3)
|
||||
}
|
||||
|
||||
var iterations = 11
|
||||
if (length(args) > 1) {
|
||||
iterations = number(args[1])
|
||||
}
|
||||
|
||||
def WARMUP = 3
|
||||
|
||||
var safe = replace(replace(name, '/', '_'), '-', '_')
|
||||
var symbol = 'js_' + safe + '_use'
|
||||
var dylib_path = './' + file + '.dylib'
|
||||
|
||||
// --- Statistics ---
|
||||
|
||||
var stat_sort = function(arr) {
|
||||
return sort(arr)
|
||||
}
|
||||
|
||||
var stat_median = function(arr) {
|
||||
if (length(arr) == 0) return 0
|
||||
var sorted = stat_sort(arr)
|
||||
var mid = floor(length(arr) / 2)
|
||||
if (length(arr) % 2 == 0) {
|
||||
return (sorted[mid - 1] + sorted[mid]) / 2
|
||||
}
|
||||
return sorted[mid]
|
||||
}
|
||||
|
||||
var stat_mean = function(arr) {
|
||||
if (length(arr) == 0) return 0
|
||||
var sum = reduce(arr, function(a, b) { return a + b })
|
||||
return sum / length(arr)
|
||||
}
|
||||
|
||||
var format_ns = function(ns) {
|
||||
if (ns < 1000) return text(round(ns)) + 'ns'
|
||||
if (ns < 1000000) return text(round(ns / 1000 * 100) / 100) + 'us'
|
||||
if (ns < 1000000000) return text(round(ns / 1000000 * 100) / 100) + 'ms'
|
||||
return text(round(ns / 1000000000 * 100) / 100) + 's'
|
||||
}
|
||||
|
||||
// --- Collect benchmarks from module ---
|
||||
|
||||
var collect_benches = function(mod) {
|
||||
var benches = []
|
||||
var keys = null
|
||||
var i = 0
|
||||
var k = null
|
||||
if (is_function(mod)) {
|
||||
push(benches, {name: 'main', fn: mod})
|
||||
} else if (is_object(mod)) {
|
||||
keys = array(mod)
|
||||
i = 0
|
||||
while (i < length(keys)) {
|
||||
k = keys[i]
|
||||
if (is_function(mod[k])) {
|
||||
push(benches, {name: k, fn: mod[k]})
|
||||
}
|
||||
i = i + 1
|
||||
}
|
||||
}
|
||||
return benches
|
||||
}
|
||||
|
||||
// --- Run one benchmark function ---
|
||||
|
||||
var run_bench = function(fn, label) {
|
||||
var samples = []
|
||||
var i = 0
|
||||
var t1 = 0
|
||||
var t2 = 0
|
||||
|
||||
// warmup
|
||||
i = 0
|
||||
while (i < WARMUP) {
|
||||
fn(1)
|
||||
i = i + 1
|
||||
}
|
||||
|
||||
// collect samples
|
||||
i = 0
|
||||
while (i < iterations) {
|
||||
t1 = os.now()
|
||||
fn(1)
|
||||
t2 = os.now()
|
||||
push(samples, t2 - t1)
|
||||
i = i + 1
|
||||
}
|
||||
|
||||
return {
|
||||
label: label,
|
||||
median: stat_median(samples),
|
||||
mean: stat_mean(samples)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Load VM module ---
|
||||
|
||||
log.bench('loading VM module: ' + file)
|
||||
var vm_mod = use(name)
|
||||
var vm_benches = collect_benches(vm_mod)
|
||||
|
||||
if (length(vm_benches) == 0) {
|
||||
log.bench('no benchmarkable functions found in ' + file)
|
||||
return
|
||||
}
|
||||
|
||||
// --- Load native module ---
|
||||
|
||||
var native_mod = null
|
||||
var native_benches = []
|
||||
var has_native = fd.is_file(dylib_path)
|
||||
var lib = null
|
||||
|
||||
if (has_native) {
|
||||
log.bench('loading native module: ' + dylib_path)
|
||||
lib = os.dylib_open(dylib_path)
|
||||
native_mod = os.dylib_symbol(lib, symbol)
|
||||
native_benches = collect_benches(native_mod)
|
||||
} else {
|
||||
log.bench('no ' + dylib_path + ' found -- VM-only benchmarking')
|
||||
log.bench(' hint: cell --dev compile.ce ' + file)
|
||||
}
|
||||
|
||||
// --- Run benchmarks ---
|
||||
|
||||
log.bench('')
|
||||
log.bench('samples: ' + text(iterations) + ' (warmup: ' + text(WARMUP) + ')')
|
||||
log.bench('')
|
||||
|
||||
var pad = function(s, n) {
|
||||
var result = s
|
||||
while (length(result) < n) result = result + ' '
|
||||
return result
|
||||
}
|
||||
|
||||
var i = 0
|
||||
var b = null
|
||||
var vm_result = null
|
||||
var j = 0
|
||||
var found = false
|
||||
var nat_result = null
|
||||
var speedup = 0
|
||||
while (i < length(vm_benches)) {
|
||||
b = vm_benches[i]
|
||||
vm_result = run_bench(b.fn, 'vm')
|
||||
|
||||
log.bench(pad(b.name, 20) + ' VM: ' + pad(format_ns(vm_result.median), 12) + ' (median) ' + format_ns(vm_result.mean) + ' (mean)')
|
||||
|
||||
// find matching native bench
|
||||
j = 0
|
||||
found = false
|
||||
while (j < length(native_benches)) {
|
||||
if (native_benches[j].name == b.name) {
|
||||
nat_result = run_bench(native_benches[j].fn, 'native')
|
||||
log.bench(pad('', 20) + ' NT: ' + pad(format_ns(nat_result.median), 12) + ' (median) ' + format_ns(nat_result.mean) + ' (mean)')
|
||||
|
||||
if (nat_result.median > 0) {
|
||||
speedup = vm_result.median / nat_result.median
|
||||
log.bench(pad('', 20) + ' speedup: ' + text(round(speedup * 100) / 100) + 'x')
|
||||
}
|
||||
found = true
|
||||
}
|
||||
j = j + 1
|
||||
}
|
||||
|
||||
if (has_native && !found) {
|
||||
log.bench(pad('', 20) + ' NT: (no matching function)')
|
||||
}
|
||||
|
||||
log.bench('')
|
||||
i = i + 1
|
||||
}
|
||||
118
bench_object.ce
118
bench_object.ce
@@ -1,118 +0,0 @@
|
||||
// bench_object.ce — object/record and string benchmark
|
||||
// Tests: property read/write, string concat, string interpolation, method-like dispatch
|
||||
|
||||
var time = use('time')
|
||||
|
||||
def iterations = 200000
|
||||
|
||||
// 1. Record create + property write
|
||||
function bench_record_create() {
|
||||
var i = 0
|
||||
var r = null
|
||||
for (i = 0; i < iterations; i++) {
|
||||
r = {x: i, y: i + 1, z: i + 2}
|
||||
}
|
||||
return r.z
|
||||
}
|
||||
|
||||
// 2. Property read in loop
|
||||
function bench_prop_read() {
|
||||
var obj = {x: 10, y: 20, z: 30, w: 40}
|
||||
var i = 0
|
||||
var s = 0
|
||||
for (i = 0; i < iterations; i++) {
|
||||
s = s + obj.x + obj.y + obj.z + obj.w
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// 3. Dynamic property access (computed keys)
|
||||
function bench_dynamic_prop() {
|
||||
var obj = {a: 1, b: 2, c: 3, d: 4, e: 5}
|
||||
var keys = ["a", "b", "c", "d", "e"]
|
||||
var i = 0
|
||||
var j = 0
|
||||
var s = 0
|
||||
for (i = 0; i < iterations; i++) {
|
||||
for (j = 0; j < 5; j++) {
|
||||
s = s + obj[keys[j]]
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// 4. String concatenation
|
||||
function bench_string_concat() {
|
||||
var i = 0
|
||||
var s = ""
|
||||
def n = 10000
|
||||
for (i = 0; i < n; i++) {
|
||||
s = s + "x"
|
||||
}
|
||||
return length(s)
|
||||
}
|
||||
|
||||
// 5. String interpolation
|
||||
function bench_interpolation() {
|
||||
var i = 0
|
||||
var s = ""
|
||||
def n = 50000
|
||||
for (i = 0; i < n; i++) {
|
||||
s = `item_${i}`
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// 6. Prototype chain / method-like call
|
||||
function make_point(x, y) {
|
||||
return {
|
||||
x: x,
|
||||
y: y,
|
||||
sum: function(self) {
|
||||
return self.x + self.y
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function bench_method_call() {
|
||||
var p = make_point(3, 4)
|
||||
var i = 0
|
||||
var s = 0
|
||||
for (i = 0; i < iterations; i++) {
|
||||
s = s + p.sum(p)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// 7. Function call overhead (simple recursion depth)
|
||||
function fib(n) {
|
||||
if (n <= 1) return n
|
||||
return fib(n - 1) + fib(n - 2)
|
||||
}
|
||||
|
||||
function bench_fncall() {
|
||||
var i = 0
|
||||
var s = 0
|
||||
for (i = 0; i < 20; i++) {
|
||||
s = s + fib(25)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
function run(name, fn) {
|
||||
var start = time.number()
|
||||
var result = fn()
|
||||
var elapsed = time.number() - start
|
||||
var ms = whole(elapsed * 100000) / 100
|
||||
log.console(` ${name}: ${ms} ms (result: ${result})`)
|
||||
}
|
||||
|
||||
log.console("=== Object / String / Call Benchmark ===")
|
||||
log.console(` iterations: ${iterations}`)
|
||||
run("record_create ", bench_record_create)
|
||||
run("prop_read ", bench_prop_read)
|
||||
run("dynamic_prop ", bench_dynamic_prop)
|
||||
run("string_concat ", bench_string_concat)
|
||||
run("interpolation ", bench_interpolation)
|
||||
run("method_call ", bench_method_call)
|
||||
run("fncall_fib25 ", bench_fncall)
|
||||
@@ -1,99 +0,0 @@
|
||||
// bench_object.js — object/string/call benchmark (QuickJS)
|
||||
|
||||
const iterations = 200000;
|
||||
|
||||
function bench_record_create() {
|
||||
let r;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
r = {x: i, y: i + 1, z: i + 2};
|
||||
}
|
||||
return r.z;
|
||||
}
|
||||
|
||||
function bench_prop_read() {
|
||||
const obj = {x: 10, y: 20, z: 30, w: 40};
|
||||
let s = 0;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
s = s + obj.x + obj.y + obj.z + obj.w;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function bench_dynamic_prop() {
|
||||
const obj = {a: 1, b: 2, c: 3, d: 4, e: 5};
|
||||
const keys = ["a", "b", "c", "d", "e"];
|
||||
let s = 0;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
for (let j = 0; j < 5; j++) {
|
||||
s = s + obj[keys[j]];
|
||||
}
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function bench_string_concat() {
|
||||
let s = "";
|
||||
const n = 10000;
|
||||
for (let i = 0; i < n; i++) {
|
||||
s = s + "x";
|
||||
}
|
||||
return s.length;
|
||||
}
|
||||
|
||||
function bench_interpolation() {
|
||||
let s = "";
|
||||
const n = 50000;
|
||||
for (let i = 0; i < n; i++) {
|
||||
s = `item_${i}`;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function make_point(x, y) {
|
||||
return {
|
||||
x: x,
|
||||
y: y,
|
||||
sum: function(self) {
|
||||
return self.x + self.y;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function bench_method_call() {
|
||||
const p = make_point(3, 4);
|
||||
let s = 0;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
s = s + p.sum(p);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function fib(n) {
|
||||
if (n <= 1) return n;
|
||||
return fib(n - 1) + fib(n - 2);
|
||||
}
|
||||
|
||||
function bench_fncall() {
|
||||
let s = 0;
|
||||
for (let i = 0; i < 20; i++) {
|
||||
s = s + fib(25);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
function run(name, fn) {
|
||||
const start = performance.now();
|
||||
const result = fn();
|
||||
const elapsed = performance.now() - start;
|
||||
console.log(` ${name}: ${elapsed.toFixed(2)} ms (result: ${result})`);
|
||||
}
|
||||
|
||||
console.log("=== Object / String / Call Benchmark ===");
|
||||
console.log(` iterations: ${iterations}`);
|
||||
run("record_create ", bench_record_create);
|
||||
run("prop_read ", bench_prop_read);
|
||||
run("dynamic_prop ", bench_dynamic_prop);
|
||||
run("string_concat ", bench_string_concat);
|
||||
run("interpolation ", bench_interpolation);
|
||||
run("method_call ", bench_method_call);
|
||||
run("fncall_fib25 ", bench_fncall);
|
||||
101
bench_object.lua
101
bench_object.lua
@@ -1,101 +0,0 @@
|
||||
-- bench_object.lua — object/string/call benchmark (Lua)
|
||||
|
||||
local iterations = 200000
|
||||
local clock = os.clock
|
||||
|
||||
local function bench_record_create()
|
||||
local r
|
||||
for i = 0, iterations - 1 do
|
||||
r = {x = i, y = i + 1, z = i + 2}
|
||||
end
|
||||
return r.z
|
||||
end
|
||||
|
||||
local function bench_prop_read()
|
||||
local obj = {x = 10, y = 20, z = 30, w = 40}
|
||||
local s = 0
|
||||
for i = 0, iterations - 1 do
|
||||
s = s + obj.x + obj.y + obj.z + obj.w
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function bench_dynamic_prop()
|
||||
local obj = {a = 1, b = 2, c = 3, d = 4, e = 5}
|
||||
local keys = {"a", "b", "c", "d", "e"}
|
||||
local s = 0
|
||||
for i = 0, iterations - 1 do
|
||||
for j = 1, 5 do
|
||||
s = s + obj[keys[j]]
|
||||
end
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function bench_string_concat()
|
||||
local parts = {}
|
||||
local n = 10000
|
||||
for i = 1, n do
|
||||
parts[i] = "x"
|
||||
end
|
||||
local s = table.concat(parts)
|
||||
return #s
|
||||
end
|
||||
|
||||
local function bench_interpolation()
|
||||
local s = ""
|
||||
local n = 50000
|
||||
for i = 0, n - 1 do
|
||||
s = string.format("item_%d", i)
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function make_point(x, y)
|
||||
return {
|
||||
x = x,
|
||||
y = y,
|
||||
sum = function(self)
|
||||
return self.x + self.y
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
local function bench_method_call()
|
||||
local p = make_point(3, 4)
|
||||
local s = 0
|
||||
for i = 0, iterations - 1 do
|
||||
s = s + p.sum(p)
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function fib(n)
|
||||
if n <= 1 then return n end
|
||||
return fib(n - 1) + fib(n - 2)
|
||||
end
|
||||
|
||||
local function bench_fncall()
|
||||
local s = 0
|
||||
for i = 0, 19 do
|
||||
s = s + fib(25)
|
||||
end
|
||||
return s
|
||||
end
|
||||
|
||||
local function run(name, fn)
|
||||
local start = clock()
|
||||
local result = fn()
|
||||
local elapsed = (clock() - start) * 1000
|
||||
print(string.format(" %s: %.2f ms (result: %s)", name, elapsed, tostring(result)))
|
||||
end
|
||||
|
||||
print("=== Object / String / Call Benchmark ===")
|
||||
print(string.format(" iterations: %d", iterations))
|
||||
run("record_create ", bench_record_create)
|
||||
run("prop_read ", bench_prop_read)
|
||||
run("dynamic_prop ", bench_dynamic_prop)
|
||||
run("string_concat ", bench_string_concat)
|
||||
run("interpolation ", bench_interpolation)
|
||||
run("method_call ", bench_method_call)
|
||||
run("fncall_fib25 ", bench_fncall)
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Run hyperfine with parameter lists
|
||||
# This will create a cross-product of all libraries × all scenarios
|
||||
hyperfine \
|
||||
--warmup 3 \
|
||||
--runs 20 \
|
||||
-i \
|
||||
--export-csv wota_vs_nota_vs_json.csv \
|
||||
--export-json wota_vs_nota_vs_json.json \
|
||||
--export-markdown wota_vs_nota_vs_json.md \
|
||||
--parameter-list lib wota,nota,json \
|
||||
--parameter-list scen empty,integers,floats,strings,objects,nested,large_array \
|
||||
'cell benchmarks/wota_nota_json {lib} {scen}'
|
||||
|
||||
|
||||
echo "Benchmark complete! Results saved to:"
|
||||
echo " - wota_vs_nota_vs_json.csv"
|
||||
echo " - wota_vs_nota_vs_json.json"
|
||||
echo " - wota_vs_nota_vs_json.md"
|
||||
2132
benchmarks/nota.json
2132
benchmarks/nota.json
File diff suppressed because it is too large
Load Diff
@@ -193,5 +193,3 @@ cell_exe = executable('cell',
|
||||
|
||||
# Install headers for building dynamic libraries using Cell
|
||||
install_headers('source/cell.h')
|
||||
install_headers('source/quickjs.h')
|
||||
install_headers('source/wota.h')
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
#endif
|
||||
|
||||
#include "cutils.h"
|
||||
|
||||
#include "dtoa.h"
|
||||
#include "libregexp.h"
|
||||
#include "libunicode.h"
|
||||
|
||||
107
todo/jit.md
107
todo/jit.md
@@ -1,107 +0,0 @@
|
||||
Yep — what you’re describing is *exactly* how fast JS engines make “normal-looking arrays” fast: **an array carries an internal “elements kind”**, e.g. “all int32”, “all doubles”, “boxed values”, and it *transitions* to a more general representation the moment you store something that doesn’t fit. V8 literally documents this “elements kinds” + transitions model (packed smi → packed double → packed elements, plus “holey” variants). ([V8][1])
|
||||
|
||||
### 1) “Numbers-only until polluted” arrays: do it — but keep it brutally simple
|
||||
|
||||
For CellScript, you can get most of the win with just **three** internal kinds:
|
||||
|
||||
* **PACKED_I32** (or “fit int” if you want)
|
||||
* **PACKED_F64**
|
||||
* **PACKED_VALUE** (boxed JSValue)
|
||||
|
||||
Rules:
|
||||
|
||||
* On write, if kind is I32 and value is i32 → store unboxed.
|
||||
* If value is non-i32 number → upgrade whole backing store to F64.
|
||||
* If value is non-number → upgrade to VALUE.
|
||||
* Never downgrade (keep it one-way like V8 does). ([V8][1])
|
||||
|
||||
Extra credit that matters more than it sounds: **forbid sparse arrays** (or define them away). If an out-of-range write extends the array, *fill with null* so the storage remains dense. That keeps iteration tight and avoids “holey” variants (which are a real perf cliff in engines). ([V8][1])
|
||||
Your standard library already nudges toward dense arrays (constructors like `array(n)` fill with null).
|
||||
|
||||
### 2) Your “fast property op assuming data properties only” is the biggest bang
|
||||
|
||||
Since you’ve banned Proxy and all descriptor/accessor machinery , you can add VM ops that assume the world is sane:
|
||||
|
||||
* `GET_PROP_PLAIN`
|
||||
* `SET_PROP_PLAIN`
|
||||
|
||||
Then slap an **inline cache** (IC) on them: cache `(shape_id, slot_offset)` for a given property name/key. On hit → direct load/store by offset. On miss → slow path resolves, updates cache.
|
||||
|
||||
This is not hypothetical: QuickJS forks have pursued this; QuickJS-NG had discussion of polymorphic inline caches (PolyIC) with reported big wins in some forks. ([GitHub][2])
|
||||
|
||||
Even if you keep “objects are fully dynamic”, ICs still work great because most call sites are monomorphic in practice.
|
||||
|
||||
### 3) “What else should I remove to make JITing easier + faster?”
|
||||
|
||||
The best deletions are the ones that eliminate **invalidation** (stuff that forces “anything could change”):
|
||||
|
||||
1. **Prototype mutation** (you already forbid it; `meme` creates, “prototypes cannot be changed”).
|
||||
2. **Accessors / defineProperty / descriptors** (you already forbid it).
|
||||
3. **Proxy / Reflect** (already gone).
|
||||
4. **Property enumeration order guarantees** — and you already *don’t* guarantee key order for `array(object)`.
|
||||
That’s secretly huge: you can store properties in whatever layout is fastest (hash + compact slots) without “insertion order” bookkeeping.
|
||||
5. **Sparse arrays / hole semantics** (if you delete this, your array JIT story becomes *way* easier).
|
||||
|
||||
Stuff that’s *less* important than people think:
|
||||
|
||||
* Keeping `delete` as a keyword is fine *if* you implement it in a JIT-friendly way (next point).
|
||||
|
||||
### 4) You can keep `delete` without wrecking shapes: make it a “logical delete”
|
||||
|
||||
If you want “`obj[k] = null` deletes it”, you can implement deletion as:
|
||||
|
||||
* keep the slot/offset **stable**
|
||||
* store **null** and mark the property as “absent for enumeration / membership”
|
||||
|
||||
So the shape doesn’t thrash and cached offsets stay valid. `delete obj[k]` becomes the same thing.
|
||||
|
||||
That’s the trick: you keep the *semantics* of deletion, but avoid the worst-case performance behavior (shape churn) that makes JITs sad.
|
||||
|
||||
### 5) What assumptions do “meme + immutable prototypes” unlock?
|
||||
|
||||
Two big ones:
|
||||
|
||||
* **Prototype chain links never change**, so once you’ve specialized a load, you don’t need “prototype changed” invalidation machinery.
|
||||
* If your prototypes are usually **stone** (module exports from `use()` are stone) , then prototype *contents* don’t change either. That means caching “property X lives on prototype P at offset Y” is stable forever.
|
||||
|
||||
In a JIT or even in an interpreter with ICs, you can:
|
||||
|
||||
* guard receiver shape once per loop (or hoist it)
|
||||
* do direct loads either from receiver or a known prototype object
|
||||
|
||||
### 6) What do `stone` and `def` buy you, concretely?
|
||||
|
||||
**stone(value)** is a promise: “no more mutations, deep.”
|
||||
That unlocks:
|
||||
|
||||
* hoisting shape checks out of loops (because the receiver won’t change shape mid-loop)
|
||||
* for stone arrays: no push/pop → stable length + stable element kind
|
||||
* for stone objects: stable slot layout; you can treat them like read-only structs *when the key is known*
|
||||
|
||||
But: **stone doesn’t magically remove the need to identify which layout you’re looking at.** If the receiver is not a compile-time constant, you still need *some* guard (shape id or pointer class id). The win is you can often make that guard **once**, then blast through the loop.
|
||||
|
||||
**def** is about *bindings*, not object mutability:
|
||||
|
||||
* a `def` global / module binding can be constant-folded and inlined
|
||||
* a `def` that holds a `key()` capability makes `obj[that_key]` an excellent JIT target: the key identity is constant, so the lookup can be cached very aggressively.
|
||||
|
||||
### 7) LuaJIT comparison: what it’s doing, and where you could beat it
|
||||
|
||||
LuaJIT is fast largely because it’s a **tracing JIT**: it records a hot path, emits IR, and inserts **guards** that bail out if assumptions break. ([GitHub][3])
|
||||
Tables also have a split **array part + hash part** representation, which is why “array-ish” use is fast. ([Percona Community][4])
|
||||
|
||||
Could CellScript beat LuaJIT? Not as an interpreter. But with:
|
||||
|
||||
* unboxed dense arrays (like above),
|
||||
* plain-data-property ICs,
|
||||
* immutable prototypes,
|
||||
* plus either a trace JIT or a simple baseline JIT…
|
||||
|
||||
…you can absolutely be in “LuaJIT-ish” territory for the patterns you care about (actors + data + tight array loops). The big JS engines are still monsters in general-purpose optimization, but your *constraints* are real leverage if you cash them in at the VM/JIT level.
|
||||
|
||||
If you implement only two performance features this year: **(1) dense unboxed arrays with one-way kind transitions, and (2) inline-cached GET/SET for plain properties.** Everything else is garnish.
|
||||
|
||||
[1]: https://v8.dev/blog/elements-kinds?utm_source=chatgpt.com "Elements kinds in V8"
|
||||
[2]: https://github.com/quickjs-ng/quickjs/issues/116?utm_source=chatgpt.com "Optimization: Add support for Poly IC · Issue #116 · quickjs- ..."
|
||||
[3]: https://github.com/tarantool/tarantool/wiki/LuaJIT-SSA-IR?utm_source=chatgpt.com "LuaJIT SSA IR"
|
||||
[4]: https://percona.community/blog/2020/04/29/the-anatomy-of-luajit-tables-and-whats-special-about-them/?utm_source=chatgpt.com "The Anatomy of LuaJIT Tables and What's Special About Them"
|
||||
@@ -1,207 +0,0 @@
|
||||
Yep — here’s the concrete picture, with the “no-Proxy” trampoline approach, and what it can/can’t do.
|
||||
|
||||
## A concrete hot-reload example (with trampolines)
|
||||
|
||||
### `sprite.cell` v1
|
||||
|
||||
```js
|
||||
// sprite.cell
|
||||
var X = key('x')
|
||||
var Y = key('y')
|
||||
|
||||
def proto = {
|
||||
move: function(dx, dy) {
|
||||
this[X] += dx
|
||||
this[Y] += dy
|
||||
}
|
||||
}
|
||||
|
||||
var make = function(x, y) {
|
||||
var s = meme(proto)
|
||||
s[X] = x
|
||||
s[Y] = y
|
||||
return s
|
||||
}
|
||||
|
||||
return {
|
||||
proto: proto,
|
||||
make: make
|
||||
}
|
||||
```
|
||||
|
||||
### What the runtime stores on first load
|
||||
|
||||
Internally (not visible to Cell), you keep a per-module record:
|
||||
|
||||
```js
|
||||
module = {
|
||||
scope: { X, Y, proto, make }, // bindings created by var/def in module
|
||||
export_current: { proto, make }, // what the module returned
|
||||
export_handle: null // stable thing returned by use() in hot mode
|
||||
}
|
||||
```
|
||||
|
||||
Now, **in hot-reload mode**, `use('sprite')` returns `export_handle` instead of `export_current`. Since you don’t have Proxy/getters, the handle can only be “dynamic” for **functions** (because functions are called). So you generate trampolines for exported functions:
|
||||
|
||||
```js
|
||||
// export_handle is a plain object
|
||||
export_handle = stone({
|
||||
// stable reference (proto is identity-critical and will be patched in place)
|
||||
proto: module.scope.proto,
|
||||
|
||||
// trampoline (always calls the latest implementation)
|
||||
make: function(...args) {
|
||||
return module.scope.make.apply(null, args)
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
Note what this buys you:
|
||||
|
||||
* Anyone who cached `var sprite = use('sprite')` keeps the same `sprite` object forever.
|
||||
* Calling `sprite.make(...)` always goes through the trampoline and hits the *current* `module.scope.make`.
|
||||
|
||||
### Reload to `sprite.cell` v2
|
||||
|
||||
Say v2 changes `move` and `make`:
|
||||
|
||||
```js
|
||||
def proto = {
|
||||
move: function(dx, dy) {
|
||||
// new behavior
|
||||
this[X] = this[X] + dx * 2
|
||||
this[Y] = this[Y] + dy * 2
|
||||
}
|
||||
}
|
||||
|
||||
var make = function(x, y) { ... } // changed too
|
||||
return { proto, make }
|
||||
```
|
||||
|
||||
Runtime reload sequence (safe point: between actor turns):
|
||||
|
||||
1. Evaluate the new module to produce `new_scope` and `new_export`.
|
||||
2. Reconcile into the old module record:
|
||||
|
||||
* **`var` bindings:** rebind
|
||||
|
||||
* `old.scope.make = new.scope.make`
|
||||
* (and any other `var`s)
|
||||
|
||||
* **`def` bindings:** keep the binding identity, but if it’s an object you want hot-updatable, **patch in place**
|
||||
|
||||
* `old.scope.proto.move = new.scope.proto.move`
|
||||
* (and other fields on proto)
|
||||
|
||||
Now the magic happens:
|
||||
|
||||
* Existing instances `s` have prototype `old.scope.proto` (stable identity).
|
||||
* You patched `old.scope.proto.move` to point at the new function.
|
||||
* So `s.move(...)` immediately uses the new behavior.
|
||||
* And `sprite.make(...)` goes through the trampoline to `old.scope.make`, which you rebound to the new `make`.
|
||||
|
||||
That’s “real” hot reload without Proxy.
|
||||
|
||||
---
|
||||
|
||||
## “Module exports just a function” — yes, and it’s actually the easiest
|
||||
|
||||
If a module returns a function:
|
||||
|
||||
```js
|
||||
// returns a function directly
|
||||
return function(x) { ... }
|
||||
```
|
||||
|
||||
Hot-reload mode can return a **trampoline function**:
|
||||
|
||||
```js
|
||||
handle = stone(function(...args) {
|
||||
return module.scope.export_function.apply(this, args)
|
||||
})
|
||||
```
|
||||
|
||||
On reload, you rebind `module.scope.export_function` to the new function, and all cached references keep working.
|
||||
|
||||
---
|
||||
|
||||
## “Module exports just a string” — possible, but not hot-swappable by reference (without changing semantics)
|
||||
|
||||
If the export is a primitive (text/number/logical/null), there’s no call boundary to hang a trampoline on. If you do:
|
||||
|
||||
```js
|
||||
return "hello"
|
||||
```
|
||||
|
||||
Then anyone who did:
|
||||
|
||||
```js
|
||||
def msg = use('msg') // msg is a text value
|
||||
```
|
||||
|
||||
…is holding the text itself. You can’t “update” that value in place without either:
|
||||
|
||||
### Option 1: Accept the limitation (recommended)
|
||||
|
||||
* Hot reload still reloads the module.
|
||||
* But **previously returned primitive exports don’t change**; callers must call `use()` again to see the new value.
|
||||
|
||||
This keeps your semantics clean.
|
||||
|
||||
### Option 2: Dev-mode wrapping (changes semantics)
|
||||
|
||||
In hot-reload mode only, return a box/thunk instead:
|
||||
|
||||
* box: `{ get: function(){...} }`
|
||||
* thunk: `function(){ return current_text }`
|
||||
|
||||
But then code that expects a text breaks unless it’s written to handle the box/thunk. Usually not worth it unless you explicitly want “dev mode has different types”.
|
||||
|
||||
**Best convention:** if you want a reloadable “string export”, export a function:
|
||||
|
||||
```js
|
||||
var value = "hello"
|
||||
return { get: function() { return value } }
|
||||
```
|
||||
|
||||
Now `get()` is trampoline-able.
|
||||
|
||||
---
|
||||
|
||||
## About `var` vs `def` on reload
|
||||
|
||||
You’re very close, just phrase it precisely:
|
||||
|
||||
* **`var`**: binding is hot-rebindable
|
||||
On reload, `old.scope[name] = new.scope[name]`.
|
||||
|
||||
* **`def`**: binding identity is stable (const binding)
|
||||
On reload, you do **not** rebind the slot.
|
||||
|
||||
But: for `def` that points to **mutable objects that must preserve identity** (like prototypes), you *can still patch the object’s fields in place*:
|
||||
|
||||
* binding stays the same object
|
||||
* the object’s contents update
|
||||
|
||||
That’s not “setting new defs to old defs”; it’s “keeping old defs, optionally copying new content into them”.
|
||||
|
||||
If you want to avoid surprises, make one explicit rule:
|
||||
|
||||
* “def objects may be patched in place during hot reload; def primitives are never replaced.”
|
||||
|
||||
---
|
||||
|
||||
## One important consequence of “no Proxy / no getters”
|
||||
|
||||
Your trampoline trick only guarantees hot-reload for:
|
||||
|
||||
* exported **functions** (via trampolines)
|
||||
* exported **objects whose identity never changes** (like `proto`), because the handle can point at the stable old object
|
||||
|
||||
It **does not** guarantee hot-reload for exported scalars that you expect to change (because the handle can’t dynamically compute a property value).
|
||||
|
||||
That’s fine! It just becomes a convention: “export state through functions, export identity anchors as objects.”
|
||||
|
||||
---
|
||||
|
||||
If you keep those rules crisp in the doc, your hot reload story becomes genuinely robust *and* lightweight: most work is “rebind vars” + “patch proto tables” + “trampoline exported functions.” The rest is just conventions that make distributed actor code sane.
|
||||
Reference in New Issue
Block a user