Files
cell/regen.ce

156 lines
3.9 KiB
Plaintext

// regen.ce — regenerate .mach bytecode files
// Run with: ./cell --core . regen
var fd = use("fd")
var json = use("json")
var tokenize = use("tokenize")
var parse = use("parse")
var fold = use("fold")
var mcode = use("mcode")
var streamline = use("streamline")
var files = [
{src: "tokenize.cm", name: "tokenize", out: "tokenize.cm.mcode"},
{src: "parse.cm", name: "parse", out: "parse.cm.mcode"},
{src: "fold.cm", name: "fold", out: "fold.cm.mcode"},
{src: "mcode.cm", name: "mcode", out: "mcode.cm.mcode"},
{src: "streamline.cm", name: "streamline", out: "streamline.cm.mcode"},
{src: "qbe.cm", name: "qbe", out: "qbe.cm.mcode"},
{src: "qbe_emit.cm", name: "qbe_emit", out: "qbe_emit.cm.mcode"},
{src: "internal/bootstrap.cm", name: "bootstrap", out: "internal/bootstrap.cm.mcode"},
{src: "internal/engine.cm", name: "engine", out: "internal/engine.cm.mcode"}
]
var i = 0
var entry = null
var src = null
var tok_result = null
var ast = null
var folded = null
var compiled = null
var optimized = null
var mcode_text = null
var f = null
var errs = null
var ei = 0
var e = null
var had_errors = false
// Collapse leaf arrays (instruction arrays) onto single lines
var compact_arrays = function(json_text) {
var lines = array(json_text, "\n")
var result = []
var i = 0
var line = null
var trimmed = null
var collecting = false
var collected = null
var indent = null
var is_leaf = null
var j = 0
var inner = null
var parts = null
var trailing = null
var chars = null
var k = 0
while (i < length(lines)) {
line = lines[i]
trimmed = trim(line)
if (collecting == false && trimmed == "[") {
collecting = true
chars = array(line)
k = 0
while (k < length(chars) && chars[k] == " ") {
k = k + 1
}
indent = text(line, 0, k)
collected = []
i = i + 1
continue
}
if (collecting) {
if (trimmed == "]" || trimmed == "],") {
is_leaf = true
j = 0
while (j < length(collected)) {
inner = trim(collected[j])
if (starts_with(inner, "[") || starts_with(inner, "{")) {
is_leaf = false
}
j = j + 1
}
if (is_leaf && length(collected) > 0) {
parts = []
j = 0
while (j < length(collected)) {
inner = trim(collected[j])
if (ends_with(inner, ",")) {
inner = text(inner, 0, length(inner) - 1)
}
parts[] = inner
j = j + 1
}
trailing = ""
if (ends_with(trimmed, ",")) {
trailing = ","
}
result[] = `${indent}[${text(parts, ", ")}]${trailing}`
} else {
result[] = `${indent}[`
j = 0
while (j < length(collected)) {
result[] = collected[j]
j = j + 1
}
result[] = line
}
collecting = false
} else {
collected[] = line
}
i = i + 1
continue
}
result[] = line
i = i + 1
}
return text(result, "\n")
}
while (i < length(files)) {
entry = files[i]
src = text(fd.slurp(entry.src))
tok_result = tokenize(src, entry.src)
ast = parse(tok_result.tokens, src, entry.src, tokenize)
// Check for parse/semantic errors
errs = ast.errors
if (errs != null && length(errs) > 0) {
ei = 0
while (ei < length(errs)) {
e = errs[ei]
if (e.line != null) {
print(`${entry.src}:${text(e.line)}:${text(e.column)}: error: ${e.message}`)
} else {
print(`${entry.src}: error: ${e.message}`)
}
ei = ei + 1
}
had_errors = true
i = i + 1
continue
}
folded = fold(ast)
compiled = mcode(folded)
optimized = streamline(compiled)
mcode_text = compact_arrays(json.encode(optimized, null, 2))
f = fd.open(entry.out, "w")
fd.write(f, mcode_text)
fd.close(f)
print(`wrote ${entry.out}`)
i = i + 1
}
if (had_errors) {
print("regen aborted: fix errors above")
}