224 lines
6.3 KiB
Plaintext
224 lines
6.3 KiB
Plaintext
// diff.ce — differential testing: run tests optimized vs unoptimized, compare results
|
|
//
|
|
// Usage:
|
|
// cell diff - diff all test files in current package
|
|
// cell diff suite - diff a specific test file (tests/suite.cm)
|
|
// cell diff tests/foo - diff a specific test file by path
|
|
var shop = use('internal/shop')
|
|
var pkg = use('package')
|
|
var fd = use('fd')
|
|
var time = use('time')
|
|
var testlib = use('internal/testlib')
|
|
|
|
var _args = args == null ? [] : args
|
|
|
|
var analyze = use('internal/os').analyze
|
|
var run_ast_fn = use('internal/os').run_ast_fn
|
|
var run_ast_noopt_fn = use('internal/os').run_ast_noopt_fn
|
|
|
|
if (!run_ast_noopt_fn) {
|
|
log.console("error: run_ast_noopt_fn not available (rebuild bootstrap)")
|
|
$stop()
|
|
return
|
|
}
|
|
|
|
// Parse arguments: diff [test_path]
|
|
var target_test = null
|
|
if (length(_args) > 0) {
|
|
target_test = _args[0]
|
|
}
|
|
|
|
var is_valid_package = testlib.is_valid_package
|
|
|
|
if (!is_valid_package('.')) {
|
|
log.console('No cell.toml found in current directory')
|
|
$stop()
|
|
return
|
|
}
|
|
|
|
// Collect test files
|
|
function collect_tests(specific_test) {
|
|
var files = pkg.list_files(null)
|
|
var test_files = []
|
|
var i = 0
|
|
var f = null
|
|
var test_name = null
|
|
var match_name = null
|
|
var match_base = null
|
|
for (i = 0; i < length(files); i++) {
|
|
f = files[i]
|
|
if (starts_with(f, "tests/") && ends_with(f, ".cm")) {
|
|
if (specific_test) {
|
|
test_name = text(f, 0, -3)
|
|
match_name = specific_test
|
|
if (!starts_with(match_name, 'tests/')) match_name = 'tests/' + match_name
|
|
match_base = ends_with(match_name, '.cm') ? text(match_name, 0, -3) : match_name
|
|
if (test_name != match_base) continue
|
|
}
|
|
push(test_files, f)
|
|
}
|
|
}
|
|
return test_files
|
|
}
|
|
|
|
var values_equal = testlib.values_equal
|
|
var describe = testlib.describe
|
|
|
|
// Run a single test file through both paths
|
|
function diff_test_file(file_path) {
|
|
var mod_path = text(file_path, 0, -3)
|
|
var src_path = fd.realpath('.') + '/' + file_path
|
|
var src = null
|
|
var ast = null
|
|
var mod_opt = null
|
|
var mod_noopt = null
|
|
var results = {file: file_path, tests: [], passed: 0, failed: 0, errors: []}
|
|
var use_pkg = fd.realpath('.')
|
|
var opt_error = null
|
|
var noopt_error = null
|
|
var keys = null
|
|
var i = 0
|
|
var k = null
|
|
var opt_result = null
|
|
var noopt_result = null
|
|
var opt_err = null
|
|
var noopt_err = null
|
|
var _run_one_opt = null
|
|
var _run_one_noopt = null
|
|
|
|
// Build env for module loading
|
|
var make_env = function() {
|
|
return stone({
|
|
use: function(path) {
|
|
return shop.use(path, use_pkg)
|
|
}
|
|
})
|
|
}
|
|
|
|
// Read and parse
|
|
var _read = function() {
|
|
src = text(fd.slurp(src_path))
|
|
ast = analyze(src, src_path)
|
|
} disruption {
|
|
push(results.errors, `failed to parse ${file_path}`)
|
|
return results
|
|
}
|
|
_read()
|
|
if (length(results.errors) > 0) return results
|
|
|
|
// Run optimized
|
|
var _run_opt = function() {
|
|
mod_opt = run_ast_fn(mod_path, ast, make_env())
|
|
} disruption {
|
|
opt_error = "disrupted"
|
|
}
|
|
_run_opt()
|
|
|
|
// Run unoptimized
|
|
var _run_noopt = function() {
|
|
mod_noopt = run_ast_noopt_fn(mod_path, ast, make_env())
|
|
} disruption {
|
|
noopt_error = "disrupted"
|
|
}
|
|
_run_noopt()
|
|
|
|
// Compare module-level behavior
|
|
if (opt_error != noopt_error) {
|
|
push(results.errors, `module load mismatch: opt=${opt_error != null ? opt_error : "ok"} noopt=${noopt_error != null ? noopt_error : "ok"}`)
|
|
results.failed = results.failed + 1
|
|
return results
|
|
}
|
|
if (opt_error != null) {
|
|
// Both disrupted during load — that's consistent
|
|
results.passed = results.passed + 1
|
|
push(results.tests, {name: "<module>", status: "passed"})
|
|
return results
|
|
}
|
|
|
|
// If module returns a record of functions, test each one
|
|
if (is_object(mod_opt) && is_object(mod_noopt)) {
|
|
keys = array(mod_opt)
|
|
while (i < length(keys)) {
|
|
k = keys[i]
|
|
if (is_function(mod_opt[k]) && is_function(mod_noopt[k])) {
|
|
opt_result = null
|
|
noopt_result = null
|
|
opt_err = null
|
|
noopt_err = null
|
|
|
|
_run_one_opt = function() {
|
|
opt_result = mod_opt[k]()
|
|
} disruption {
|
|
opt_err = "disrupted"
|
|
}
|
|
_run_one_opt()
|
|
|
|
_run_one_noopt = function() {
|
|
noopt_result = mod_noopt[k]()
|
|
} disruption {
|
|
noopt_err = "disrupted"
|
|
}
|
|
_run_one_noopt()
|
|
|
|
if (opt_err != noopt_err) {
|
|
push(results.tests, {name: k, status: "failed"})
|
|
push(results.errors, `${k}: disruption mismatch opt=${opt_err != null ? opt_err : "ok"} noopt=${noopt_err != null ? noopt_err : "ok"}`)
|
|
results.failed = results.failed + 1
|
|
} else if (!values_equal(opt_result, noopt_result)) {
|
|
push(results.tests, {name: k, status: "failed"})
|
|
push(results.errors, `${k}: result mismatch opt=${describe(opt_result)} noopt=${describe(noopt_result)}`)
|
|
results.failed = results.failed + 1
|
|
} else {
|
|
push(results.tests, {name: k, status: "passed"})
|
|
results.passed = results.passed + 1
|
|
}
|
|
}
|
|
i = i + 1
|
|
}
|
|
} else {
|
|
// Compare direct return values
|
|
if (!values_equal(mod_opt, mod_noopt)) {
|
|
push(results.tests, {name: "<return>", status: "failed"})
|
|
push(results.errors, `return value mismatch: opt=${describe(mod_opt)} noopt=${describe(mod_noopt)}`)
|
|
results.failed = results.failed + 1
|
|
} else {
|
|
push(results.tests, {name: "<return>", status: "passed"})
|
|
results.passed = results.passed + 1
|
|
}
|
|
}
|
|
|
|
return results
|
|
}
|
|
|
|
// Main
|
|
var test_files = collect_tests(target_test)
|
|
log.console(`Differential testing: ${text(length(test_files))} file(s)`)
|
|
|
|
var total_passed = 0
|
|
var total_failed = 0
|
|
var i = 0
|
|
var result = null
|
|
var j = 0
|
|
|
|
while (i < length(test_files)) {
|
|
result = diff_test_file(test_files[i])
|
|
log.console(` ${result.file}: ${text(result.passed)} passed, ${text(result.failed)} failed`)
|
|
j = 0
|
|
while (j < length(result.errors)) {
|
|
log.console(` MISMATCH: ${result.errors[j]}`)
|
|
j = j + 1
|
|
}
|
|
total_passed = total_passed + result.passed
|
|
total_failed = total_failed + result.failed
|
|
i = i + 1
|
|
}
|
|
|
|
log.console(`----------------------------------------`)
|
|
log.console(`Diff: ${text(total_passed)} passed, ${text(total_failed)} failed, ${text(total_passed + total_failed)} total`)
|
|
|
|
if (total_failed > 0) {
|
|
log.console(`DIFFERENTIAL FAILURES DETECTED`)
|
|
}
|
|
|
|
$stop()
|